mirror of
https://github.com/rclone/rclone.git
synced 2026-02-03 10:13:22 +00:00
Compare commits
67 Commits
v1.46.0
...
fix-mega-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6811629877 | ||
|
|
415eeca6cf | ||
|
|
58d9a3e1b5 | ||
|
|
cccadfa7ae | ||
|
|
1b52f8d2a5 | ||
|
|
2078ad68a5 | ||
|
|
368ed9e67d | ||
|
|
7c30993bb7 | ||
|
|
55b9a4ed30 | ||
|
|
118a8b949e | ||
|
|
1d14e30383 | ||
|
|
27714e29c3 | ||
|
|
9f8e1a1dc5 | ||
|
|
1692c6bd0a | ||
|
|
d233efbf63 | ||
|
|
e9a45a5a34 | ||
|
|
f6eb5c6983 | ||
|
|
2bf19787d5 | ||
|
|
0ea3a57ecb | ||
|
|
b353c730d8 | ||
|
|
173dfbd051 | ||
|
|
e3bceb9083 | ||
|
|
52c6b373cc | ||
|
|
0bc0f62277 | ||
|
|
12c8ee4b4b | ||
|
|
5240f9d1e5 | ||
|
|
997654d77d | ||
|
|
f1809451f6 | ||
|
|
84c650818e | ||
|
|
c5775cf73d | ||
|
|
dca482e058 | ||
|
|
6943169cef | ||
|
|
4fddec113c | ||
|
|
2114fd8f26 | ||
|
|
63bb6de491 | ||
|
|
0a56a168ff | ||
|
|
88e22087a8 | ||
|
|
9404ed703a | ||
|
|
c7ecccd5ca | ||
|
|
972e27a861 | ||
|
|
8f4ea77c07 | ||
|
|
61616ba864 | ||
|
|
9ed721a3f6 | ||
|
|
0b9d7fec0c | ||
|
|
240c15883f | ||
|
|
38864adc9c | ||
|
|
5991315990 | ||
|
|
73f0a67d98 | ||
|
|
ffe067d6e7 | ||
|
|
b5f563fb0f | ||
|
|
9310c7f3e2 | ||
|
|
1c1a8ef24b | ||
|
|
2cfbc2852d | ||
|
|
b167d30420 | ||
|
|
ec59760d9c | ||
|
|
076d3da825 | ||
|
|
c3eecbe933 | ||
|
|
d8e5b19ed4 | ||
|
|
43bc381e90 | ||
|
|
fb5ee22112 | ||
|
|
35327dad6f | ||
|
|
ef5e1909a0 | ||
|
|
bca5d8009e | ||
|
|
334f19c974 | ||
|
|
42a5bf1d9f | ||
|
|
71d1890316 | ||
|
|
d29c545627 |
@@ -20,6 +20,9 @@ linters:
|
||||
disable-all: true
|
||||
|
||||
issues:
|
||||
# Enable some lints excluded by default
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- tip
|
||||
go_import_path: github.com/ncw/rclone
|
||||
before_install:
|
||||
@@ -43,7 +44,7 @@ matrix:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: 1.11.x
|
||||
go: 1.12.x
|
||||
env: GOTAGS=""
|
||||
cache:
|
||||
directories:
|
||||
@@ -55,5 +56,5 @@ deploy:
|
||||
on:
|
||||
repo: ncw/rclone
|
||||
all_branches: true
|
||||
go: 1.11.x
|
||||
go: 1.12.x
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
6
Makefile
6
Makefile
@@ -11,14 +11,14 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
# Run full tests if go >= go1.11
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
|
||||
# Run full tests if go >= go1.12
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 12)')
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
|
||||
@@ -36,6 +36,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/jottacloud"
|
||||
_ "github.com/ncw/rclone/backend/koofr"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/mega"
|
||||
_ "github.com/ncw/rclone/backend/onedrive"
|
||||
|
||||
@@ -155,7 +155,7 @@ type Fs struct {
|
||||
noAuthClient *http.Client // unauthenticated http client
|
||||
root string // the path we are working on
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
trueRootID string // ID of true root directory
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
@@ -273,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -144,7 +144,7 @@ type Fs struct {
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
containerOK bool // true if we have created the container
|
||||
containerDeleted bool // true if we have deleted the container
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
}
|
||||
|
||||
@@ -347,7 +347,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
@@ -392,6 +392,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
|
||||
f.container = parts.ContainerName
|
||||
containerURL = azblob.NewContainerURL(*u, pipeline)
|
||||
} else {
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
@@ -1385,16 +1386,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
blob := o.getBlobReference()
|
||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||
httpHeaders.ContentType = fs.MimeType(o)
|
||||
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate
|
||||
// MD5 only for PutBlob requests
|
||||
if size < int64(o.fs.opt.UploadCutoff) {
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
// Compute the Content-MD5 of the file, for multiparts uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
|
||||
// in order to validate its integrity during transport
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -125,6 +125,14 @@ minimum size.`,
|
||||
Help: `Disable checksums for large (> upload cutoff) files`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_url",
|
||||
Help: `Custom endpoint for downloads.
|
||||
|
||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||
free egress for data downloaded through the Cloudflare network.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -140,6 +148,7 @@ type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
@@ -158,7 +167,7 @@ type Fs struct {
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadURLResponse // result of get upload URL calls
|
||||
authMu sync.Mutex // lock for authorizing the account
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
bufferTokens chan []byte // control concurrency of multipart uploads
|
||||
}
|
||||
|
||||
@@ -242,13 +251,7 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
|
||||
fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
|
||||
}
|
||||
}
|
||||
retryAfterDuration := time.Duration(retryAfter) * time.Second
|
||||
if f.pacer.GetSleep() < retryAfterDuration {
|
||||
fs.Debugf(f, "Setting sleep to %v after error: %v", retryAfterDuration, err)
|
||||
// We set 1/2 the value here because the pacer will double it immediately
|
||||
f.pacer.SetSleep(retryAfterDuration / 2)
|
||||
}
|
||||
return true, err
|
||||
return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
@@ -354,7 +357,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -1296,9 +1299,17 @@ var _ io.ReadCloser = &openFile{}
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.fs.info.DownloadURL,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
// otherwise use the custom downloadUrl
|
||||
if o.fs.opt.DownloadURL == "" {
|
||||
opts.RootURL = o.fs.info.DownloadURL
|
||||
} else {
|
||||
opts.RootURL = o.fs.opt.DownloadURL
|
||||
}
|
||||
|
||||
// Download by id if set otherwise by name
|
||||
if o.id != "" {
|
||||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||
|
||||
@@ -111,7 +111,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
}
|
||||
@@ -260,7 +260,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -186,10 +186,10 @@ func init() {
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
Help: "Google Application Client Secret\nSetting your own is recommended.",
|
||||
}, {
|
||||
Name: "scope",
|
||||
Help: "Scope that rclone should use when requesting access from drive.",
|
||||
@@ -426,7 +426,7 @@ type Fs struct {
|
||||
client *http.Client // authorized client
|
||||
rootFolderID string // the id of the root folder
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
exportExtensions []string // preferred extensions to download docs
|
||||
importMimeTypes []string // MIME types to convert to docs
|
||||
isTeamDrive bool // true if this is a team drive
|
||||
@@ -676,28 +676,33 @@ func isPowerOfTwo(x int64) bool {
|
||||
}
|
||||
|
||||
// add a charset parameter to all text/* MIME types
|
||||
func fixMimeType(mimeType string) string {
|
||||
mediaType, param, err := mime.ParseMediaType(mimeType)
|
||||
func fixMimeType(mimeTypeIn string) string {
|
||||
if mimeTypeIn == "" {
|
||||
return ""
|
||||
}
|
||||
mediaType, param, err := mime.ParseMediaType(mimeTypeIn)
|
||||
if err != nil {
|
||||
return mimeType
|
||||
return mimeTypeIn
|
||||
}
|
||||
if strings.HasPrefix(mimeType, "text/") && param["charset"] == "" {
|
||||
mimeTypeOut := mimeTypeIn
|
||||
if strings.HasPrefix(mediaType, "text/") && param["charset"] == "" {
|
||||
param["charset"] = "utf-8"
|
||||
mimeType = mime.FormatMediaType(mediaType, param)
|
||||
mimeTypeOut = mime.FormatMediaType(mediaType, param)
|
||||
}
|
||||
return mimeType
|
||||
if mimeTypeOut == "" {
|
||||
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
|
||||
}
|
||||
return mimeTypeOut
|
||||
}
|
||||
func fixMimeTypeMap(m map[string][]string) map[string][]string {
|
||||
for _, v := range m {
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
for i, mt := range v {
|
||||
fixed := fixMimeType(mt)
|
||||
if fixed == "" {
|
||||
panic(errors.Errorf("unable to fix MIME type %q", mt))
|
||||
}
|
||||
v[i] = fixed
|
||||
v[i] = fixMimeType(mt)
|
||||
}
|
||||
out[fixMimeType(k)] = v
|
||||
}
|
||||
return m
|
||||
return out
|
||||
}
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
@@ -789,8 +794,8 @@ func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
}
|
||||
|
||||
// newPacer makes a pacer configured for drive
|
||||
func newPacer(opt *Options) *pacer.Pacer {
|
||||
return pacer.New().SetMinSleep(time.Duration(opt.PacerMinSleep)).SetBurst(opt.PacerBurst).SetPacer(pacer.GoogleDrivePacer)
|
||||
func newPacer(opt *Options) *fs.Pacer {
|
||||
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
|
||||
}
|
||||
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
@@ -902,6 +907,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: true,
|
||||
}).Fill(f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
@@ -2430,6 +2436,10 @@ func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (
|
||||
return req, nil, err
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
if o.bytes == 0 {
|
||||
// Don't supply range requests for 0 length objects as they always fail
|
||||
delete(req.Header, "Range")
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
|
||||
@@ -160,7 +160,7 @@ type Fs struct {
|
||||
team team.Client // for the Teams API
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
}
|
||||
|
||||
@@ -209,12 +209,12 @@ func shouldRetry(err error) (bool, error) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
|
||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behavior for backward compatibility
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
@@ -273,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
config := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
|
||||
@@ -16,6 +16,7 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
@@ -45,6 +46,8 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
// NOTE: This API is deprecated
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
@@ -144,6 +147,22 @@ func init() {
|
||||
Value: "publicReadWrite",
|
||||
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_policy_only",
|
||||
Help: `Access checks should use bucket-level IAM policies.
|
||||
|
||||
If you want to upload objects to a bucket with Bucket Policy Only set
|
||||
then you will need to set this.
|
||||
|
||||
When it is set, rclone:
|
||||
|
||||
- ignores ACLs set on buckets
|
||||
- ignores ACLs set on objects
|
||||
- creates buckets with Bucket Policy Only set
|
||||
|
||||
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "location",
|
||||
Help: "Location for the newly created buckets.",
|
||||
@@ -241,6 +260,7 @@ type Options struct {
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
}
|
||||
@@ -256,7 +276,7 @@ type Fs struct {
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -381,7 +401,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
ctx := context.Background()
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -395,7 +419,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -709,8 +733,19 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
if f.opt.BucketPolicyOnly {
|
||||
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
||||
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
|
||||
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -976,7 +1011,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
|
||||
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -190,7 +190,7 @@ type Fs struct {
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
apiSrv *rest.Client
|
||||
pacer *pacer.Pacer
|
||||
pacer *fs.Pacer
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
@@ -381,6 +381,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
// add jottacloud to the long list of sites that don't follow the oauth spec correctly
|
||||
oauth2.RegisterBrokenAuthHeaderProvider("https://www.jottacloud.com/")
|
||||
|
||||
// the oauth client for the api servers needs
|
||||
// a filter to fix the grant_type issues (see above)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
@@ -403,7 +406,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
|
||||
589
backend/koofr/koofr.go
Normal file
589
backend/koofr/koofr.go
Normal file
@@ -0,0 +1,589 @@
|
||||
package koofr
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
|
||||
httpclient "github.com/koofr/go-httpclient"
|
||||
koofrclient "github.com/koofr/go-koofrclient"
|
||||
)
|
||||
|
||||
// Register Fs with rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "koofr",
|
||||
Description: "Koofr",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
Password string `config:"password"`
|
||||
}
|
||||
|
||||
// A Fs is a representation of a remote Koofr Fs
|
||||
type Fs struct {
|
||||
name string
|
||||
mountID string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
client *koofrclient.KoofrClient
|
||||
}
|
||||
|
||||
// An Object on the remote Koofr Fs
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
info koofrclient.FileInfo
|
||||
}
|
||||
|
||||
func base(pth string) string {
|
||||
rv := path.Base(pth)
|
||||
if rv == "" || rv == "." {
|
||||
rv = "/"
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
func dir(pth string) string {
|
||||
rv := path.Dir(pth)
|
||||
if rv == "" || rv == "." {
|
||||
rv = "/"
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
// String returns a string representation of the remote Object
|
||||
func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path of the Object, relative to Fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the Object
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
|
||||
}
|
||||
|
||||
// Size return the size of the Object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Size
|
||||
}
|
||||
|
||||
// Fs returns a reference to the Koofr Fs containing the Object
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns an MD5 hash of the Object
|
||||
func (o *Object) Hash(typ hash.Type) (string, error) {
|
||||
if typ == hash.MD5 {
|
||||
return o.info.Hash, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// fullPath returns full path of the remote Object (including Fs root)
|
||||
func (o *Object) fullPath() string {
|
||||
return o.fs.fullPath(o.remote)
|
||||
}
|
||||
|
||||
// Storable returns true if the Object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime is not supported
|
||||
func (o *Object) SetModTime(mtime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens the Object for reading
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var sOff, eOff int64 = 0, -1
|
||||
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
sOff = x.Offset
|
||||
case *fs.RangeOption:
|
||||
sOff = x.Start
|
||||
eOff = x.End
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if sOff == 0 && eOff < 0 {
|
||||
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
if sOff < 0 {
|
||||
sOff = o.Size() - eOff
|
||||
eOff = o.Size()
|
||||
}
|
||||
if eOff > o.Size() {
|
||||
eOff = o.Size()
|
||||
}
|
||||
span := &koofrclient.FileSpan{
|
||||
Start: sOff,
|
||||
End: eOff,
|
||||
}
|
||||
return o.fs.client.FilesGetRange(o.fs.mountID, o.fullPath(), span)
|
||||
}
|
||||
|
||||
// Update updates the Object contents
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
putopts := &koofrclient.PutFilter{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
IgnoreNonExisting: true,
|
||||
}
|
||||
fullPath := o.fullPath()
|
||||
dirPath := dir(fullPath)
|
||||
name := base(fullPath)
|
||||
err := o.fs.mkdir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.info = *info
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the remote Object
|
||||
func (o *Object) Remove() error {
|
||||
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
|
||||
// Name returns the name of the Fs
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root path of the Fs
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a string representation of the Fs
|
||||
func (f *Fs) String() string {
|
||||
return "koofr:" + f.mountID + ":" + f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features supported by this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision denotes that setting modification times is not supported
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns a set of hashes are Provided by the Fs
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// fullPath constructs a full, absolute path from a Fs root relative path,
|
||||
func (f *Fs) fullPath(part string) string {
|
||||
return path.Join("/", f.root, part)
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
|
||||
basicAuth := fmt.Sprintf("Basic %s",
|
||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||
client.HTTPClient.Headers.Set("Authorization", basicAuth)
|
||||
mounts, err := client.Mounts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
client: client,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
for _, m := range mounts {
|
||||
if opt.MountID != "" {
|
||||
if m.Id == opt.MountID {
|
||||
f.mountID = m.Id
|
||||
break
|
||||
}
|
||||
} else if m.IsPrimary {
|
||||
f.mountID = m.Id
|
||||
break
|
||||
}
|
||||
}
|
||||
if f.mountID == "" {
|
||||
if opt.MountID == "" {
|
||||
return nil, errors.New("Failed to find primary mount")
|
||||
}
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
f.root = dir(f.root)
|
||||
err = fs.ErrorIsFile
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// List returns a list of items in a directory
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return nil, translateErrorsDir(err)
|
||||
}
|
||||
entries = make([]fs.DirEntry, len(files))
|
||||
for i, file := range files {
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
info: file,
|
||||
remote: path.Join(dir, file.Name),
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject creates a new remote Object for a given remote path
|
||||
func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
|
||||
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
if info.Type == "dir" {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
info: info,
|
||||
remote: remote,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Put updates a remote Object
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
|
||||
putopts := &koofrclient.PutFilter{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
IgnoreNonExisting: true,
|
||||
}
|
||||
fullPath := f.fullPath(src.Remote())
|
||||
dirPath := dir(fullPath)
|
||||
name := base(fullPath)
|
||||
err = f.mkdir(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts)
|
||||
if err != nil {
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
info: *info,
|
||||
remote: src.Remote(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutStream updates a remote Object with a stream of unknown size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
}
|
||||
|
||||
// isBadRequest is a predicate which holds true iff the error returned was
|
||||
// HTTP status 400
|
||||
func isBadRequest(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusBadRequest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// translateErrorsDir translates koofr errors to rclone errors (for a dir
|
||||
// operation)
|
||||
func translateErrorsDir(err error) error {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// translatesErrorsObject translates Koofr errors to rclone errors (for an object operation)
|
||||
func translateErrorsObject(err error) error {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// mkdir creates a directory at the given remote path. Creates ancestors if
|
||||
// neccessary
|
||||
func (f *Fs) mkdir(fullPath string) error {
|
||||
if fullPath == "/" {
|
||||
return nil
|
||||
}
|
||||
info, err := f.client.FilesInfo(f.mountID, fullPath)
|
||||
if err == nil && info.Type == "dir" {
|
||||
return nil
|
||||
}
|
||||
err = translateErrorsDir(err)
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
return err
|
||||
}
|
||||
dirs := strings.Split(fullPath, "/")
|
||||
parent := "/"
|
||||
for _, part := range dirs {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
info, err = f.client.FilesInfo(f.mountID, path.Join(parent, part))
|
||||
if err != nil || info.Type != "dir" {
|
||||
err = translateErrorsDir(err)
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
return err
|
||||
}
|
||||
err = f.client.FilesNewFolder(f.mountID, parent, part)
|
||||
if err != nil && !isBadRequest(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
parent = path.Join(parent, part)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates a directory at the given remote path. Creates ancestors if
|
||||
// necessary
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
fullPath := f.fullPath(dir)
|
||||
return f.mkdir(fullPath)
|
||||
}
|
||||
|
||||
// Rmdir removes an (empty) directory at the given remote path
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return translateErrorsDir(err)
|
||||
}
|
||||
if len(files) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
err = f.client.FilesDelete(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return translateErrorsDir(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy copies a remote Object to the given path
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
dstFullPath := f.fullPath(remote)
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
|
||||
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
|
||||
f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
}
|
||||
|
||||
// Move moves a remote Object to the given path
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj := src.(*Object)
|
||||
dstFullPath := f.fullPath(remote)
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err = f.client.FilesMove(srcObj.fs.mountID,
|
||||
srcObj.fs.fullPath(srcObj.remote), f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
}
|
||||
|
||||
// DirMove moves a remote directory to the given path
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs := src.(*Fs)
|
||||
srcFullPath := srcFs.fullPath(srcRemote)
|
||||
dstFullPath := f.fullPath(dstRemote)
|
||||
if srcFs.mountID == f.mountID && srcFullPath == dstFullPath {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
err = f.client.FilesMove(srcFs.mountID, srcFullPath, f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// About reports space usage (with a MB precision)
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
mount, err := f.client.MountsDetails(f.mountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(mount.SpaceTotal * 1024 * 1024),
|
||||
Used: fs.NewUsageValue(mount.SpaceUsed * 1024 * 1024),
|
||||
Trashed: nil,
|
||||
Other: nil,
|
||||
Free: fs.NewUsageValue((mount.SpaceTotal - mount.SpaceUsed) * 1024 * 1024),
|
||||
Objects: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Purge purges the complete Fs
|
||||
func (f *Fs) Purge() error {
|
||||
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
|
||||
return err
|
||||
}
|
||||
|
||||
// linkCreate is a Koofr API request for creating a public link
|
||||
type linkCreate struct {
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// link is a Koofr API response to creating a public link
|
||||
type link struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Counter int64 `json:"counter"`
|
||||
URL string `json:"url"`
|
||||
ShortURL string `json:"shortUrl"`
|
||||
Hash string `json:"hash"`
|
||||
Host string `json:"host"`
|
||||
HasPassword bool `json:"hasPassword"`
|
||||
Password string `json:"password"`
|
||||
ValidFrom int64 `json:"validFrom"`
|
||||
ValidTo int64 `json:"validTo"`
|
||||
PasswordRequired bool `json:"passwordRequired"`
|
||||
}
|
||||
|
||||
// createLink makes a Koofr API call to create a public link
|
||||
func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link, error) {
|
||||
linkCreate := linkCreate{
|
||||
Path: path,
|
||||
}
|
||||
linkData := link{}
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "POST",
|
||||
Path: "/api/v2/mounts/" + mountID + "/links",
|
||||
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
|
||||
ReqEncoding: httpclient.EncodingJSON,
|
||||
ReqValue: linkCreate,
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &linkData,
|
||||
}
|
||||
|
||||
_, err := c.Request(&request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &linkData, nil
|
||||
}
|
||||
|
||||
// PublicLink creates a public link to the remote path
|
||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return "", translateErrorsDir(err)
|
||||
}
|
||||
return linkData.ShortURL, nil
|
||||
}
|
||||
14
backend/koofr/koofr_test.go
Normal file
14
backend/koofr/koofr_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package koofr_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestKoofr:",
|
||||
})
|
||||
}
|
||||
@@ -98,7 +98,7 @@ type Fs struct {
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
srv *mega.Mega // the connection to the server
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
rootNodeMu sync.Mutex // mutex for _rootNode
|
||||
_rootNode *mega.Node // root node - call findRoot to use this
|
||||
mkdirMu sync.Mutex // used to serialize calls to mkdir / rmdir
|
||||
@@ -217,7 +217,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: srv,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
|
||||
@@ -261,7 +261,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
driveID string // ID to use for querying Microsoft Graph
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
@@ -475,7 +475,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
driveID: opt.DriveID,
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -1488,7 +1488,7 @@ func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
if size <= 0 {
|
||||
panic("size passed into uploadMultipart must be > 0")
|
||||
return nil, errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
|
||||
// Create upload session
|
||||
@@ -1535,7 +1535,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
||||
// This function will set modtime after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
panic("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
|
||||
@@ -65,7 +65,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
session UserSessionInfo // contains the session data
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
}
|
||||
@@ -144,7 +144,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, "0", f)
|
||||
|
||||
@@ -161,7 +161,6 @@ type UserInfo struct {
|
||||
PublicLinkQuota int64 `json:"publiclinkquota"`
|
||||
Email string `json:"email"`
|
||||
UserID int `json:"userid"`
|
||||
Result int `json:"result"`
|
||||
Quota int64 `json:"quota"`
|
||||
TrashRevretentionDays int `json:"trashrevretentiondays"`
|
||||
Premium bool `json:"premium"`
|
||||
|
||||
@@ -95,7 +95,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
@@ -254,7 +254,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
|
||||
@@ -346,7 +346,7 @@ func init() {
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-west-1.dream.io",
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
Provider: "Dreamhost",
|
||||
}, {
|
||||
@@ -782,7 +782,7 @@ type Fs struct {
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
}
|
||||
|
||||
@@ -1055,7 +1055,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -427,6 +427,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
}
|
||||
|
||||
return NewFsWithConnection(name, root, opt, sshConfig)
|
||||
}
|
||||
|
||||
// NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to
|
||||
// the host specified in the ssh.ClientConfig
|
||||
func NewFsWithConnection(name string, root string, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
|
||||
@@ -216,7 +216,7 @@ type Fs struct {
|
||||
containerOK bool // true if we have created the container
|
||||
segmentsContainer string // container to store the segments (if any) in
|
||||
noCheckContainer bool // don't check the container before creating it
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
@@ -401,7 +401,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
|
||||
@@ -69,7 +69,7 @@ type Prop struct {
|
||||
Status []string `xml:"DAV: status"`
|
||||
Name string `xml:"DAV: prop>displayname,omitempty"`
|
||||
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
|
||||
IsCollection *int `xml:"DAV: prop>iscollection,omitempty"` // this is a Microsoft extension see #2716
|
||||
IsCollection *string `xml:"DAV: prop>iscollection,omitempty"` // this is a Microsoft extension see #2716
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
|
||||
|
||||
@@ -101,7 +101,7 @@ type Fs struct {
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
@@ -173,9 +173,16 @@ func itemIsDir(item *api.Response) bool {
|
||||
fs.Debugf(nil, "Unknown resource type %q/%q on %q", t.Space, t.Local, item.Props.Name)
|
||||
}
|
||||
// the iscollection prop is a Microsoft extension, but if present it is a reliable indicator
|
||||
// if the above check failed - see #2716
|
||||
// if the above check failed - see #2716. This can be an integer or a boolean - see #2964
|
||||
if t := item.Props.IsCollection; t != nil {
|
||||
return *t != 0
|
||||
switch x := strings.ToLower(*t); x {
|
||||
case "0", "false":
|
||||
return false
|
||||
case "1", "true":
|
||||
return true
|
||||
default:
|
||||
fs.Debugf(nil, "Unknown value %q for IsCollection", x)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -311,7 +318,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -93,7 +93,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the yandex server
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
diskRoot string // root path with "disk:/" container name
|
||||
}
|
||||
|
||||
@@ -269,7 +269,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
name: name,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -17,14 +17,18 @@ import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"golang.org/x/net/html"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -33,6 +37,7 @@ var (
|
||||
install = flag.Bool("install", false, "Install the downloaded package using sudo dpkg -i.")
|
||||
extract = flag.String("extract", "", "Extract the named executable from the .tar.gz and install into bindir.")
|
||||
bindir = flag.String("bindir", defaultBinDir(), "Directory to install files downloaded with -extract.")
|
||||
useAPI = flag.Bool("use-api", false, "Use the API for finding the release instead of scraping the page.")
|
||||
// Globals
|
||||
matchProject = regexp.MustCompile(`^([\w-]+)/([\w-]+)$`)
|
||||
osAliases = map[string][]string{
|
||||
@@ -209,6 +214,55 @@ func getAsset(project string, matchName *regexp.Regexp) (string, string) {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// Get an asset URL and name by scraping the downloads page
|
||||
//
|
||||
// This doesn't use the API so isn't rate limited when not using GITHUB login details
|
||||
func getAssetFromReleasesPage(project string, matchName *regexp.Regexp) (assetURL string, assetName string) {
|
||||
baseURL := "https://github.com/" + project + "/releases"
|
||||
log.Printf("Fetching asset info for %q from %q", project, baseURL)
|
||||
base, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
log.Fatalf("URL Parse failed: %v", err)
|
||||
}
|
||||
resp, err := http.Get(baseURL)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to fetch release info %q: %v", baseURL, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Printf("Error: %s", readBody(resp.Body))
|
||||
log.Fatalf("Bad status %d when fetching %q release info: %s", resp.StatusCode, baseURL, resp.Status)
|
||||
}
|
||||
doc, err := html.Parse(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse web page: %v", err)
|
||||
}
|
||||
var walk func(*html.Node)
|
||||
walk = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "a" {
|
||||
for _, a := range n.Attr {
|
||||
if a.Key == "href" {
|
||||
if name := path.Base(a.Val); matchName.MatchString(name) && isOurOsArch(name) {
|
||||
if u, err := rest.URLJoin(base, a.Val); err == nil {
|
||||
assetName = name
|
||||
assetURL = u.String()
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
walk(c)
|
||||
}
|
||||
}
|
||||
walk(doc)
|
||||
if assetName == "" || assetURL == "" {
|
||||
log.Fatalf("Didn't find URL in page")
|
||||
}
|
||||
return assetURL, assetName
|
||||
}
|
||||
|
||||
// isOurOsArch returns true if s contains our OS and our Arch
|
||||
func isOurOsArch(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
@@ -346,7 +400,12 @@ func main() {
|
||||
log.Fatalf("Invalid regexp for name %q: %v", nameRe, err)
|
||||
}
|
||||
|
||||
assetURL, assetName := getAsset(project, matchName)
|
||||
var assetURL, assetName string
|
||||
if *useAPI {
|
||||
assetURL, assetName = getAsset(project, matchName)
|
||||
} else {
|
||||
assetURL, assetName = getAssetFromReleasesPage(project, matchName)
|
||||
}
|
||||
fileName := filepath.Join(os.TempDir(), assetName)
|
||||
getFile(assetURL, fileName)
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ docs = [
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
"mega.md",
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
|
||||
@@ -29,7 +29,7 @@ github-release release \
|
||||
--name "rclone" \
|
||||
--description "Rclone - rsync for cloud storage. Sync files to and from many cloud storage providers."
|
||||
|
||||
for build in `ls build | grep -v current`; do
|
||||
for build in `ls build | grep -v current | grep -v testbuilds`; do
|
||||
echo "Uploading ${build}"
|
||||
base="${build%.*}"
|
||||
parts=(${base//-/ })
|
||||
|
||||
@@ -341,8 +341,7 @@ func initConfig() {
|
||||
configflags.SetFlags()
|
||||
|
||||
// Load filters
|
||||
var err error
|
||||
filter.Active, err = filter.NewFilter(&filterflags.Opt)
|
||||
err := filterflags.Reload()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load filters: %v", err)
|
||||
}
|
||||
|
||||
@@ -7,8 +7,13 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -69,7 +74,7 @@ changed recently very efficiently like this:
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.CopyDir(fdst, fsrc)
|
||||
return sync.CopyDir(fdst, fsrc, createEmptySrcDirs)
|
||||
}
|
||||
return operations.CopyFile(fdst, fsrc, srcFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -48,7 +48,7 @@ destination.
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.CopyDir(fdst, fsrc)
|
||||
return sync.CopyDir(fdst, fsrc, false)
|
||||
}
|
||||
return operations.CopyFile(fdst, fsrc, dstFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -37,7 +37,7 @@ documentation, changelog and configuration walkthroughs.
|
||||
|
||||
const (
|
||||
bashCompletionFunc = `
|
||||
__custom_func() {
|
||||
__rclone_custom_func() {
|
||||
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
|
||||
local cur cword prev words
|
||||
if declare -F _init_completion > /dev/null; then
|
||||
@@ -45,7 +45,7 @@ __custom_func() {
|
||||
else
|
||||
__rclone_init_completion -n : || return
|
||||
fi
|
||||
if [[ $cur =~ ^[[:alnum:]]*$ ]]; then
|
||||
if [[ $cur =~ ^[[:alnum:]_]*$ ]]; then
|
||||
local remote
|
||||
while IFS= read -r remote; do
|
||||
[[ $remote != $cur* ]] || COMPREPLY+=("$remote")
|
||||
@@ -54,7 +54,7 @@ __custom_func() {
|
||||
local paths=("$cur"*)
|
||||
[[ ! -f ${paths[0]} ]] || COMPREPLY+=("${paths[@]}")
|
||||
fi
|
||||
elif [[ $cur =~ ^[[:alnum:]]+: ]]; then
|
||||
elif [[ $cur =~ ^[[:alnum:]_]+: ]]; then
|
||||
local path=${cur#*:}
|
||||
if [[ $path == */* ]]; then
|
||||
local prefix=${path%/*}
|
||||
|
||||
123
cmd/info/info.go
123
cmd/info/info.go
@@ -21,11 +21,22 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type position int
|
||||
|
||||
const (
|
||||
positionMiddle position = 1 << iota
|
||||
positionLeft
|
||||
positionRight
|
||||
positionNone position = 0
|
||||
positionAll position = positionRight<<1 - 1
|
||||
)
|
||||
|
||||
var (
|
||||
checkNormalization bool
|
||||
checkControl bool
|
||||
checkLength bool
|
||||
checkStreaming bool
|
||||
positionList = []position{positionMiddle, positionLeft, positionRight}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -59,7 +70,7 @@ a bit of go code for each one.
|
||||
type results struct {
|
||||
f fs.Fs
|
||||
mu sync.Mutex
|
||||
charNeedsEscaping map[rune]bool
|
||||
stringNeedsEscaping map[string]position
|
||||
maxFileLength int
|
||||
canWriteUnnormalized bool
|
||||
canReadUnnormalized bool
|
||||
@@ -69,8 +80,8 @@ type results struct {
|
||||
|
||||
func newResults(f fs.Fs) *results {
|
||||
return &results{
|
||||
f: f,
|
||||
charNeedsEscaping: make(map[rune]bool),
|
||||
f: f,
|
||||
stringNeedsEscaping: make(map[string]position),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,13 +90,13 @@ func (r *results) Print() {
|
||||
fmt.Printf("// %s\n", r.f.Name())
|
||||
if checkControl {
|
||||
escape := []string{}
|
||||
for c, needsEscape := range r.charNeedsEscaping {
|
||||
if needsEscape {
|
||||
for c, needsEscape := range r.stringNeedsEscaping {
|
||||
if needsEscape != positionNone {
|
||||
escape = append(escape, fmt.Sprintf("0x%02X", c))
|
||||
}
|
||||
}
|
||||
sort.Strings(escape)
|
||||
fmt.Printf("charNeedsEscaping = []byte{\n")
|
||||
fmt.Printf("stringNeedsEscaping = []byte{\n")
|
||||
fmt.Printf("\t%s\n", strings.Join(escape, ", "))
|
||||
fmt.Printf("}\n")
|
||||
}
|
||||
@@ -130,20 +141,45 @@ func (r *results) checkUTF8Normalization() {
|
||||
}
|
||||
}
|
||||
|
||||
// check we can write file with the rune passed in
|
||||
func (r *results) checkChar(c rune) {
|
||||
fs.Infof(r.f, "Writing file 0x%02X", c)
|
||||
path := fmt.Sprintf("0x%02X-%c-", c, c)
|
||||
_, err := r.writeFile(path)
|
||||
escape := false
|
||||
if err != nil {
|
||||
fs.Infof(r.f, "Couldn't write file 0x%02X", c)
|
||||
escape = true
|
||||
} else {
|
||||
fs.Infof(r.f, "OK writing file 0x%02X", c)
|
||||
func (r *results) checkStringPositions(s string) {
|
||||
fs.Infof(r.f, "Writing position file 0x%0X", s)
|
||||
positionError := positionNone
|
||||
|
||||
for _, pos := range positionList {
|
||||
path := ""
|
||||
switch pos {
|
||||
case positionMiddle:
|
||||
path = fmt.Sprintf("position-middle-%0X-%s-", s, s)
|
||||
case positionLeft:
|
||||
path = fmt.Sprintf("%s-position-left-%0X", s, s)
|
||||
case positionRight:
|
||||
path = fmt.Sprintf("position-right-%0X-%s", s, s)
|
||||
default:
|
||||
panic("invalid position: " + pos.String())
|
||||
}
|
||||
_, writeErr := r.writeFile(path)
|
||||
if writeErr != nil {
|
||||
fs.Infof(r.f, "Writing %s position file 0x%0X Error: %s", pos.String(), s, writeErr)
|
||||
} else {
|
||||
fs.Infof(r.f, "Writing %s position file 0x%0X OK", pos.String(), s)
|
||||
}
|
||||
obj, getErr := r.f.NewObject(path)
|
||||
if getErr != nil {
|
||||
fs.Infof(r.f, "Getting %s position file 0x%0X Error: %s", pos.String(), s, getErr)
|
||||
} else {
|
||||
if obj.Size() != 50 {
|
||||
fs.Infof(r.f, "Getting %s position file 0x%0X Invalid Size: %d", pos.String(), s, obj.Size())
|
||||
} else {
|
||||
fs.Infof(r.f, "Getting %s position file 0x%0X OK", pos.String(), s)
|
||||
}
|
||||
}
|
||||
if writeErr != nil || getErr != nil {
|
||||
positionError += pos
|
||||
}
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
r.charNeedsEscaping[c] = escape
|
||||
r.stringNeedsEscaping[s] = positionError
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -157,19 +193,28 @@ func (r *results) checkControls() {
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
for i := rune(0); i < 128; i++ {
|
||||
s := string(i)
|
||||
if i == 0 || i == '/' {
|
||||
// We're not even going to check NULL or /
|
||||
r.charNeedsEscaping[i] = true
|
||||
r.stringNeedsEscaping[s] = positionAll
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
c := i
|
||||
go func() {
|
||||
go func(s string) {
|
||||
defer wg.Done()
|
||||
token := <-tokens
|
||||
r.checkChar(c)
|
||||
r.checkStringPositions(s)
|
||||
tokens <- token
|
||||
}()
|
||||
}(s)
|
||||
}
|
||||
for _, s := range []string{"\", "\xBF", "\xFE"} {
|
||||
wg.Add(1)
|
||||
go func(s string) {
|
||||
defer wg.Done()
|
||||
token := <-tokens
|
||||
r.checkStringPositions(s)
|
||||
tokens <- token
|
||||
}(s)
|
||||
}
|
||||
wg.Wait()
|
||||
fs.Infof(r.f, "Done trying to create control character file names")
|
||||
@@ -268,3 +313,35 @@ func readInfo(f fs.Fs) error {
|
||||
r.Print()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e position) String() string {
|
||||
switch e {
|
||||
case positionNone:
|
||||
return "none"
|
||||
case positionAll:
|
||||
return "all"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if e&positionMiddle != 0 {
|
||||
buf.WriteString("middle")
|
||||
e &= ^positionMiddle
|
||||
}
|
||||
if e&positionLeft != 0 {
|
||||
if buf.Len() != 0 {
|
||||
buf.WriteRune(',')
|
||||
}
|
||||
buf.WriteString("left")
|
||||
e &= ^positionLeft
|
||||
}
|
||||
if e&positionRight != 0 {
|
||||
if buf.Len() != 0 {
|
||||
buf.WriteRune(',')
|
||||
}
|
||||
buf.WriteString("right")
|
||||
e &= ^positionRight
|
||||
}
|
||||
if e != positionNone {
|
||||
panic("invalid position")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
40
cmd/info/process.sh
Normal file
40
cmd/info/process.sh
Normal file
@@ -0,0 +1,40 @@
|
||||
set -euo pipefail
|
||||
|
||||
for f in info-*.log; do
|
||||
for pos in middle left right; do
|
||||
egrep -oe " Writing $pos position file [^ ]* \w+" $f | sort | cut -d' ' -f 7 > $f.write_$pos
|
||||
egrep -oe " Getting $pos position file [^ ]* \w+" $f | sort | cut -d' ' -f 7 > $f.get_$pos
|
||||
done
|
||||
{
|
||||
echo "${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}"
|
||||
echo "Write\tWrite\tWrite\tGet\tGet\tGet"
|
||||
echo "Mid\tLeft\tRight\tMid\tLeft\tRight"
|
||||
paste $f.write_{middle,left,right} $f.get_{middle,left,right}
|
||||
} > $f.csv
|
||||
done
|
||||
|
||||
for f in info-*.list; do
|
||||
for pos in middle left right; do
|
||||
cat $f | perl -lne 'print $1 if /^\s+[0-9]+\s+(.*)/' | grep -a "position-$pos-" | sort > $f.$pos
|
||||
done
|
||||
{
|
||||
echo "${${f%.list}#info-}\t${${f%.list}#info-}\t${${f%.list}#info-}"
|
||||
echo "List\tList\tList"
|
||||
echo "Mid\tLeft\tRight"
|
||||
for e in 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F BF EFBCBC FE; do
|
||||
echo -n $(perl -lne 'print "'$e'-$1" if /^position-middle-'$e'-(.*)-/' $f.middle | tr -d "\t\r" | grep -a . || echo Miss)
|
||||
echo -n "\t"
|
||||
echo -n $(perl -lne 'print "'$e'-$1" if /^(.*)-position-left-'$e'/' $f.left | tr -d "\t\r" | grep -a . || echo Miss)
|
||||
echo -n "\t"
|
||||
echo $(perl -lne 'print "'$e'-$1" if /^position-right-'$e'-(.*)/' $f.right | tr -d "\t\r" | grep -a . || echo Miss)
|
||||
# echo -n $(grep -a "position-middle-$e-" $f.middle | tr -d "\t\r" || echo Miss)"\t"
|
||||
# echo -n $(grep -a "position-left-$e" $f.left | tr -d "\t\r" || echo Miss)"\t"
|
||||
# echo $(grep -a "position-right-$e-" $f.right | tr -d "\t\r" || echo Miss)
|
||||
done
|
||||
} > $f.csv
|
||||
done
|
||||
|
||||
for f in info-*.list; do
|
||||
paste ${f%.list}.log.csv $f.csv > ${f%.list}.full.csv
|
||||
done
|
||||
paste *.full.csv > info-complete.csv
|
||||
3
cmd/info/test.cmd
Normal file
3
cmd/info/test.cmd
Normal file
@@ -0,0 +1,3 @@
|
||||
rclone.exe purge info
|
||||
rclone.exe info -vv info > info-LocalWindows.log 2>&1
|
||||
rclone.exe ls -vv info > info-LocalWindows.list 2>&1
|
||||
43
cmd/info/test.sh
Executable file
43
cmd/info/test.sh
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env zsh
|
||||
#
|
||||
# example usage:
|
||||
# $GOPATH/src/github.com/ncw/rclone/cmd/info/test.sh --list | \
|
||||
# parallel -P20 $GOPATH/src/github.com/ncw/rclone/cmd/info/test.sh
|
||||
|
||||
export PATH=$GOPATH/src/github.com/ncw/rclone:$PATH
|
||||
|
||||
typeset -A allRemotes
|
||||
allRemotes=(
|
||||
TestAmazonCloudDrive '--low-level-retries=2 --checkers=5'
|
||||
TestB2 ''
|
||||
TestBox ''
|
||||
TestDrive '--tpslimit=5'
|
||||
TestCrypt ''
|
||||
TestDropbox '--checkers=1'
|
||||
TestJottacloud ''
|
||||
TestMega ''
|
||||
TestOneDrive ''
|
||||
TestOpenDrive '--low-level-retries=2 --checkers=5'
|
||||
TestPcloud '--low-level-retries=2 --timeout=15s'
|
||||
TestS3 ''
|
||||
Local ''
|
||||
)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
set -- ${(k)allRemotes[@]}
|
||||
elif [[ $1 = --list ]]; then
|
||||
printf '%s\n' ${(k)allRemotes[@]}
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for remote; do
|
||||
dir=$remote:infotest
|
||||
if [[ $remote = Local ]]; then
|
||||
dir=infotest
|
||||
fi
|
||||
rclone purge $dir || :
|
||||
rclone info -vv $dir ${=allRemotes[$remote]} &> info-$remote.log
|
||||
rclone ls -vv $dir &> info-$remote.list
|
||||
done
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -67,8 +66,10 @@ output:
|
||||
s - size
|
||||
t - modification time
|
||||
h - hash
|
||||
i - ID of object if known
|
||||
i - ID of object
|
||||
o - Original ID of underlying object
|
||||
m - MimeType of object if known
|
||||
e - encrypted name
|
||||
|
||||
So if you wanted the path, size and modification time, you would use
|
||||
--format "pst", or maybe --format "tsp" to put the path last.
|
||||
@@ -161,6 +162,10 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
|
||||
list.SetCSV(csv)
|
||||
list.SetDirSlash(dirSlash)
|
||||
list.SetAbsolute(absolute)
|
||||
var opt = operations.ListJSONOpt{
|
||||
NoModTime: true,
|
||||
Recurse: recurse,
|
||||
}
|
||||
|
||||
for _, char := range format {
|
||||
switch char {
|
||||
@@ -168,38 +173,38 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
|
||||
list.AddPath()
|
||||
case 't':
|
||||
list.AddModTime()
|
||||
opt.NoModTime = false
|
||||
case 's':
|
||||
list.AddSize()
|
||||
case 'h':
|
||||
list.AddHash(hashType)
|
||||
opt.ShowHash = true
|
||||
case 'i':
|
||||
list.AddID()
|
||||
case 'm':
|
||||
list.AddMimeType()
|
||||
case 'e':
|
||||
list.AddEncrypted()
|
||||
opt.ShowEncrypted = true
|
||||
case 'o':
|
||||
list.AddOrigID()
|
||||
opt.ShowOrigIDs = true
|
||||
default:
|
||||
return errors.Errorf("Unknown format character %q", char)
|
||||
}
|
||||
}
|
||||
|
||||
return walk.Walk(fsrc, "", false, operations.ConfigMaxDepth(recurse), func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
fs.Errorf(path, "error listing: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, entry := range entries {
|
||||
_, isDir := entry.(fs.Directory)
|
||||
if isDir {
|
||||
if filesOnly {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if dirsOnly {
|
||||
continue
|
||||
}
|
||||
return operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error {
|
||||
if item.IsDir {
|
||||
if filesOnly {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if dirsOnly {
|
||||
return nil
|
||||
}
|
||||
_, _ = fmt.Fprintln(out, list.Format(entry))
|
||||
}
|
||||
_, _ = fmt.Fprintln(out, list.Format(item))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -10,11 +10,13 @@ import (
|
||||
// Globals
|
||||
var (
|
||||
deleteEmptySrcDirs = false
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -52,7 +54,7 @@ can speed transfers up greatly.
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs)
|
||||
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs, createEmptySrcDirs)
|
||||
}
|
||||
return operations.MoveFile(fdst, fsrc, srcFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -52,7 +52,7 @@ transfer.
|
||||
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.MoveDir(fdst, fsrc, false)
|
||||
return sync.MoveDir(fdst, fsrc, false, false)
|
||||
}
|
||||
return operations.MoveFile(fdst, fsrc, dstFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
runewidth "github.com/mattn/go-runewidth"
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/cmd/ncdu/scan"
|
||||
"github.com/ncw/rclone/fs"
|
||||
@@ -122,7 +123,7 @@ func Printf(x, y int, fg, bg termbox.Attribute, format string, args ...interface
|
||||
func Line(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, msg string) {
|
||||
for _, c := range msg {
|
||||
termbox.SetCell(x, y, c, fg, bg)
|
||||
x++
|
||||
x += runewidth.RuneWidth(c)
|
||||
if x >= xmax {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,8 +6,13 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after sync")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -39,7 +44,7 @@ go there.
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
return sync.Sync(fdst, fsrc)
|
||||
return sync.Sync(fdst, fsrc, createEmptySrcDirs)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ Rclone is a command line program to sync files and directories to and from:
|
||||
* {{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
|
||||
* {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
* {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
* {{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
|
||||
* {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
|
||||
* {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
|
||||
* {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}
|
||||
|
||||
@@ -236,3 +236,12 @@ Contributors
|
||||
* weetmuts <oehrstroem@gmail.com>
|
||||
* Jonathan <vanillajonathan@users.noreply.github.com>
|
||||
* James Carpenter <orbsmiv@users.noreply.github.com>
|
||||
* Vince <vince0villamora@gmail.com>
|
||||
* Nestar47 <47841759+Nestar47@users.noreply.github.com>
|
||||
* Six <brbsix@gmail.com>
|
||||
* Alexandru Bumbacea <alexandru.bumbacea@booking.com>
|
||||
* calisro <robert.calistri@gmail.com>
|
||||
* Dr.Rx <david.rey@nventive.com>
|
||||
* marcintustin <marcintustin@users.noreply.github.com>
|
||||
* jaKa Močnik <jaka@koofr.net>
|
||||
* Fionera <fionera@fionera.de>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Usage"
|
||||
date: "2015-06-06"
|
||||
date: "2019-02-25"
|
||||
---
|
||||
|
||||
Configure
|
||||
@@ -34,6 +34,7 @@ See the following for detailed instructions for
|
||||
* [HTTP](/http/)
|
||||
* [Hubic](/hubic/)
|
||||
* [Jottacloud](/jottacloud/)
|
||||
* [Koofr](/koofr/)
|
||||
* [Mega](/mega/)
|
||||
* [Microsoft Azure Blob Storage](/azureblob/)
|
||||
* [Microsoft OneDrive](/onedrive/)
|
||||
@@ -98,7 +99,7 @@ The main rclone commands with most used first
|
||||
* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output shell completion scripts for rclone.
|
||||
* [rclone gendocs](/commands/rclone_gendocs/) - Output markdown docs for rclone to the directory supplied.
|
||||
* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file.
|
||||
* [rclone mount](/commands/rclone_mount/) - Mount the remote as a mountpoint. **EXPERIMENTAL**
|
||||
* [rclone mount](/commands/rclone_mount/) - Mount the remote as a mountpoint.
|
||||
* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest.
|
||||
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone.conf
|
||||
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Check the integrity of a crypted remote.
|
||||
@@ -170,11 +171,24 @@ should be the name or prefix of a backend (the `type` in the config
|
||||
file) and all the configuration for the backend should be provided on
|
||||
the command line (or in environment variables).
|
||||
|
||||
Eg
|
||||
Here are some examples:
|
||||
|
||||
rclone lsd --http-url https://pub.rclone.org :http:
|
||||
|
||||
Which lists all the directories in `pub.rclone.org`.
|
||||
To list all the directories in the root of `https://pub.rclone.org/`.
|
||||
|
||||
rclone lsf --http-url https://example.com :http:path/to/dir
|
||||
|
||||
To list files and directories in `https://example.com/path/to/dir/`
|
||||
|
||||
rclone copy --http-url https://example.com :http:path/to/dir /tmp/dir
|
||||
|
||||
To copy files and directories in `https://example.com/path/to/dir` to `/tmp/dir`.
|
||||
|
||||
rclone copy --sftp-host example.com :sftp:path/to/dir /tmp/dir
|
||||
|
||||
To copy files and directories from `example.com` in the relative
|
||||
directory `path/to/dir` to `/tmp/dir` using sftp.
|
||||
|
||||
Quoting and the shell
|
||||
---------------------
|
||||
@@ -670,6 +684,24 @@ uploaded compressed files.
|
||||
There is no need to set this in normal operation, and doing so will
|
||||
decrease the network transfer efficiency of rclone.
|
||||
|
||||
### --no-traverse ###
|
||||
|
||||
The `--no-traverse` flag controls whether the destination file system
|
||||
is traversed when using the `copy` or `move` commands.
|
||||
`--no-traverse` is not compatible with `sync` and will be ignored if
|
||||
you supply it with `sync`.
|
||||
|
||||
If you are only copying a small number of files (or are filtering most
|
||||
of the files) and/or have a large number of files on the destination
|
||||
then `--no-traverse` will stop rclone listing the destination and save
|
||||
time.
|
||||
|
||||
However, if you are copying a large number of files, especially if you
|
||||
are doing a copy where lots of the files under consideration haven't
|
||||
changed and won't need copying then you shouldn't use `--no-traverse`.
|
||||
|
||||
See [rclone copy](/commands/rclone_copy/) for an example of how to use it.
|
||||
|
||||
### --no-update-modtime ###
|
||||
|
||||
When using this flag, rclone won't update modification times of remote
|
||||
@@ -991,6 +1023,47 @@ with this setting.
|
||||
|
||||
Prints the version number
|
||||
|
||||
SSL/TLS options
|
||||
---------------
|
||||
|
||||
The outoing SSL/TLS connections rclone makes can be controlled with
|
||||
these options. For example this can be very useful with the HTTP or
|
||||
WebDAV backends. Rclone HTTP servers have their own set of
|
||||
configuration for SSL/TLS which you can find in their documentation.
|
||||
|
||||
### --ca-cert string
|
||||
|
||||
This loads the PEM encoded certificate authority certificate and uses
|
||||
it to verify the certificates of the servers rclone connects to.
|
||||
|
||||
If you have generated certificates signed with a local CA then you
|
||||
will need this flag to connect to servers using those certificates.
|
||||
|
||||
### --client-cert string
|
||||
|
||||
This loads the PEM encoded client side certificate.
|
||||
|
||||
This is used for [mutual TLS authentication](https://en.wikipedia.org/wiki/Mutual_authentication).
|
||||
|
||||
The `--client-key` flag is required too when using this.
|
||||
|
||||
### --client-key string
|
||||
|
||||
This loads the PEM encoded client side private key used for mutual TLS
|
||||
authentication. Used in conjunction with `--client-cert`.
|
||||
|
||||
### --no-check-certificate=true/false ###
|
||||
|
||||
`--no-check-certificate` controls whether a client verifies the
|
||||
server's certificate chain and host name.
|
||||
If `--no-check-certificate` is true, TLS accepts any certificate
|
||||
presented by the server and any host name in that certificate.
|
||||
In this mode, TLS is susceptible to man-in-the-middle attacks.
|
||||
|
||||
This option defaults to `false`.
|
||||
|
||||
**This should be used only for testing.**
|
||||
|
||||
Configuration Encryption
|
||||
------------------------
|
||||
Your configuration file contains information for logging in to
|
||||
@@ -1147,36 +1220,6 @@ use it.
|
||||
|
||||
Write memory profile to file. This can be analysed with `go tool pprof`.
|
||||
|
||||
### --no-check-certificate=true/false ###
|
||||
|
||||
`--no-check-certificate` controls whether a client verifies the
|
||||
server's certificate chain and host name.
|
||||
If `--no-check-certificate` is true, TLS accepts any certificate
|
||||
presented by the server and any host name in that certificate.
|
||||
In this mode, TLS is susceptible to man-in-the-middle attacks.
|
||||
|
||||
This option defaults to `false`.
|
||||
|
||||
**This should be used only for testing.**
|
||||
|
||||
### --no-traverse ###
|
||||
|
||||
The `--no-traverse` flag controls whether the destination file system
|
||||
is traversed when using the `copy` or `move` commands.
|
||||
`--no-traverse` is not compatible with `sync` and will be ignored if
|
||||
you supply it with `sync`.
|
||||
|
||||
If you are only copying a small number of files (or are filtering most
|
||||
of the files) and/or have a large number of files on the destination
|
||||
then `--no-traverse` will stop rclone listing the destination and save
|
||||
time.
|
||||
|
||||
However, if you are copying a large number of files, especially if you
|
||||
are doing a copy where lots of the files under consideration haven't
|
||||
changed and won't need copying then you shouldn't use `--no-traverse`.
|
||||
|
||||
See [rclone copy](/commands/rclone_copy/) for an example of how to use it.
|
||||
|
||||
Filtering
|
||||
---------
|
||||
|
||||
|
||||
@@ -854,6 +854,15 @@ The most likely cause of this is the duplicated file issue above - run
|
||||
`rclone dedupe` and check your logs for duplicate object or directory
|
||||
messages.
|
||||
|
||||
This can also be caused by a delay/caching on google drive's end when
|
||||
comparing directory listings. Specifically with team drives used in
|
||||
combination with --fast-list. Files that were uploaded recently may
|
||||
not appear on the directory list sent to rclone when using --fast-list.
|
||||
|
||||
Waiting a moderate period of time between attempts (estimated to be
|
||||
approximately 1 hour) and/or not using --fast-list both seem to be
|
||||
effective in preventing the problem.
|
||||
|
||||
### Making your own client_id ###
|
||||
|
||||
When you use rclone with Google drive in its default configuration you
|
||||
|
||||
@@ -188,3 +188,10 @@ causes not all domains to be resolved properly.
|
||||
Additionally with the `GODEBUG=netdns=` environment variable the Go
|
||||
resolver decision can be influenced. This also allows to resolve certain
|
||||
issues with DNS resolution. See the [name resolution section in the go docs](https://golang.org/pkg/net/#hdr-Name_Resolution).
|
||||
|
||||
### The total size reported in the stats for a sync is wrong and keeps changing
|
||||
|
||||
It is likely you have more than 10,000 files that need to be
|
||||
synced. By default rclone only gets 10,000 files ahead in a sync so as
|
||||
not to use up too much memory. You can change this default with the
|
||||
[--max-backlog](/docs/#max-backlog-n) flag.
|
||||
|
||||
@@ -217,6 +217,20 @@ the rclone config file, you can set `service_account_credentials` with
|
||||
the actual contents of the file instead, or set the equivalent
|
||||
environment variable.
|
||||
|
||||
### Application Default Credentials ###
|
||||
|
||||
If no other source of credentials is provided, rclone will fall back
|
||||
to
|
||||
[Application Default Credentials](https://cloud.google.com/video-intelligence/docs/common/auth#authenticating_with_application_default_credentials)
|
||||
this is useful both when you already have configured authentication
|
||||
for your developer account, or in production when running on a google
|
||||
compute host. Note that if running in docker, you may need to run
|
||||
additional commands on your google compute machine -
|
||||
[see this page](https://cloud.google.com/container-registry/docs/advanced-authentication#gcloud_as_a_docker_credential_helper).
|
||||
|
||||
Note that in the case application default credentials are used, there
|
||||
is no need to explicitly configure a project number.
|
||||
|
||||
### --fast-list ###
|
||||
|
||||
This remote supports `--fast-list` which allows you to use fewer
|
||||
@@ -328,6 +342,27 @@ Access Control List for new buckets.
|
||||
- "publicReadWrite"
|
||||
- Project team owners get OWNER access, and all Users get WRITER access.
|
||||
|
||||
#### --gcs-bucket-policy-only
|
||||
|
||||
Access checks should use bucket-level IAM policies.
|
||||
|
||||
If you want to upload objects to a bucket with Bucket Policy Only set
|
||||
then you will need to set this.
|
||||
|
||||
When it is set, rclone:
|
||||
|
||||
- ignores ACLs set on buckets
|
||||
- ignores ACLs set on objects
|
||||
- creates buckets with Bucket Policy Only set
|
||||
|
||||
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
|
||||
|
||||
- Config: bucket_policy_only
|
||||
- Env Var: RCLONE_GCS_BUCKET_POLICY_ONLY
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --gcs-location
|
||||
|
||||
Location for the newly created buckets.
|
||||
|
||||
189
docs/content/koofr.md
Normal file
189
docs/content/koofr.md
Normal file
@@ -0,0 +1,189 @@
|
||||
---
|
||||
title: "Koofr"
|
||||
description: "Rclone docs for Koofr"
|
||||
date: "2019-02-25"
|
||||
---
|
||||
|
||||
<i class="fa fa-suitcase"></i> Koofr
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
The initial setup for Koofr involves creating an application password for
|
||||
rclone. You can do that by opening the Koofr
|
||||
[web application](https://app.koofr.net/app/admin/preferences/password),
|
||||
giving the password a nice name like `rclone` and clicking on generate.
|
||||
|
||||
Here is an example of how to make a remote called `koofr`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> koofr
|
||||
Type of storage to configure.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / A stackable unification remote, which can appear to merge the contents of several remotes
|
||||
\ "union"
|
||||
2 / Alias for a existing remote
|
||||
\ "alias"
|
||||
3 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)
|
||||
\ "s3"
|
||||
5 / Backblaze B2
|
||||
\ "b2"
|
||||
6 / Box
|
||||
\ "box"
|
||||
7 / Cache a remote
|
||||
\ "cache"
|
||||
8 / Dropbox
|
||||
\ "dropbox"
|
||||
9 / Encrypt/Decrypt a remote
|
||||
\ "crypt"
|
||||
10 / FTP Connection
|
||||
\ "ftp"
|
||||
11 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
12 / Google Drive
|
||||
\ "drive"
|
||||
13 / Hubic
|
||||
\ "hubic"
|
||||
14 / JottaCloud
|
||||
\ "jottacloud"
|
||||
15 / Koofr
|
||||
\ "koofr"
|
||||
16 / Local Disk
|
||||
\ "local"
|
||||
17 / Mega
|
||||
\ "mega"
|
||||
18 / Microsoft Azure Blob Storage
|
||||
\ "azureblob"
|
||||
19 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
20 / OpenDrive
|
||||
\ "opendrive"
|
||||
21 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
22 / Pcloud
|
||||
\ "pcloud"
|
||||
23 / QingCloud Object Storage
|
||||
\ "qingstor"
|
||||
24 / SSH/SFTP Connection
|
||||
\ "sftp"
|
||||
25 / Webdav
|
||||
\ "webdav"
|
||||
26 / Yandex Disk
|
||||
\ "yandex"
|
||||
27 / http Connection
|
||||
\ "http"
|
||||
Storage> koofr
|
||||
** See help for koofr backend at: https://rclone.org/koofr/ **
|
||||
|
||||
Your Koofr user name
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
user> USER@NAME
|
||||
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
|
||||
y) Yes type in my own password
|
||||
g) Generate random password
|
||||
y/g> y
|
||||
Enter the password:
|
||||
password:
|
||||
Confirm the password:
|
||||
password:
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> n
|
||||
Remote config
|
||||
--------------------
|
||||
[koofr]
|
||||
type = koofr
|
||||
baseurl = https://app.koofr.net
|
||||
user = USER@NAME
|
||||
password = *** ENCRYPTED ***
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
You can choose to edit advanced config in order to enter your own service URL
|
||||
if you use an on-premise or white label Koofr instance, or choose an alternative
|
||||
mount instead of your primary storage.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your Koofr
|
||||
|
||||
rclone lsd koofr:
|
||||
|
||||
List all the files in your Koofr
|
||||
|
||||
rclone ls koofr:
|
||||
|
||||
To copy a local directory to an Koofr directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/koofr/koofr.go then run make backenddocs -->
|
||||
### Standard Options
|
||||
|
||||
Here are the standard options specific to koofr (Koofr).
|
||||
|
||||
#### --koofr-user
|
||||
|
||||
Your Koofr user name
|
||||
|
||||
- Config: user
|
||||
- Env Var: RCLONE_KOOFR_USER
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --koofr-password
|
||||
|
||||
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
|
||||
|
||||
- Config: password
|
||||
- Env Var: RCLONE_KOOFR_PASSWORD
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to koofr (Koofr).
|
||||
|
||||
#### --koofr-baseurl
|
||||
|
||||
Base URL of the Koofr API to connect to
|
||||
|
||||
- Config: baseurl
|
||||
- Env Var: RCLONE_KOOFR_BASEURL
|
||||
- Type: string
|
||||
- Default: "https://app.koofr.net"
|
||||
|
||||
#### --koofr-mountid
|
||||
|
||||
Mount ID of the mount to use. If omitted, the primary mount is used.
|
||||
|
||||
- Config: mountid
|
||||
- Env Var: RCLONE_KOOFR_MOUNTID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
<!--- autogenerated options stop -->
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that Koofr is case insensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
@@ -2,7 +2,7 @@
|
||||
title: "Overview of cloud storage systems"
|
||||
description: "Overview of cloud storage systems"
|
||||
type: page
|
||||
date: "2015-09-06"
|
||||
date: "2019-02-25"
|
||||
---
|
||||
|
||||
# Overview of cloud storage systems #
|
||||
@@ -28,6 +28,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| HTTP | - | No | No | No | R |
|
||||
| Hubic | MD5 | Yes | No | No | R/W |
|
||||
| Jottacloud | MD5 | Yes | Yes | No | R/W |
|
||||
| Koofr | MD5 | No | Yes | No | - |
|
||||
| Mega | - | No | No | Yes | - |
|
||||
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
|
||||
| Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R |
|
||||
|
||||
@@ -1112,6 +1112,11 @@ server_side_encryption =
|
||||
storage_class =
|
||||
```
|
||||
|
||||
If you are using an older version of CEPH, eg 10.2.x Jewel, then you
|
||||
may need to supply the parameter `--s3-upload-cutoff 0` or put this in
|
||||
the config file as `upload_cutoff 0` to work around a bug which causes
|
||||
uploading of small files to fail.
|
||||
|
||||
Note also that Ceph sometimes puts `/` in the passwords it gives
|
||||
users. If you read the secret access key using the command line tools
|
||||
you will get a JSON blob with the `/` escaped as `\/`. Make sure you
|
||||
|
||||
@@ -67,6 +67,7 @@
|
||||
<li><a href="/http/"><i class="fa fa-globe"></i> HTTP</a></li>
|
||||
<li><a href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a></li>
|
||||
<li><a href="/jottacloud/"><i class="fa fa-cloud"></i> Jottacloud</a></li>
|
||||
<li><a href="/koofr/"><i class="fa fa-suitcase"></i> Koofr</a></li>
|
||||
<li><a href="/mega/"><i class="fa fa-archive"></i> Mega</a></li>
|
||||
<li><a href="/azureblob/"><i class="fa fa-windows"></i> Microsoft Azure Blob Storage</a></li>
|
||||
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft OneDrive</a></li>
|
||||
|
||||
@@ -201,8 +201,9 @@ func (s *StatsInfo) String() string {
|
||||
}
|
||||
dtRounded := dt - (dt % (time.Second / 10))
|
||||
|
||||
displaySpeed := speed
|
||||
if fs.Config.DataRateUnit == "bits" {
|
||||
speed = speed * 8
|
||||
displaySpeed *= 8
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -235,7 +236,7 @@ func (s *StatsInfo) String() string {
|
||||
fs.SizeSuffix(s.bytes),
|
||||
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
||||
percent(s.bytes, totalSize),
|
||||
fs.SizeSuffix(speed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
|
||||
fs.SizeSuffix(displaySpeed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
|
||||
etaString(currentSize, totalSize, speed),
|
||||
xfrchkString,
|
||||
)
|
||||
|
||||
@@ -87,6 +87,9 @@ type ConfigInfo struct {
|
||||
Progress bool
|
||||
Cookie bool
|
||||
UseMmap bool
|
||||
CaCert string // Client Side CA
|
||||
ClientCert string // Client Side Cert
|
||||
ClientKey string // Client Side Key
|
||||
}
|
||||
|
||||
// NewConfig creates a new config with everything set to the default
|
||||
|
||||
@@ -89,6 +89,9 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.UseMmap, "use-mmap", "", fs.Config.UseMmap, "Use mmap allocator (see docs).")
|
||||
flags.StringVarP(flagSet, &fs.Config.CaCert, "ca-cert", "", fs.Config.CaCert, "CA certificate used to verify servers")
|
||||
flags.StringVarP(flagSet, &fs.Config.ClientCert, "client-cert", "", fs.Config.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth")
|
||||
flags.StringVarP(flagSet, &fs.Config.ClientKey, "client-key", "", fs.Config.ClientKey, "Client SSL private key (PEM) for mutual TLS auth")
|
||||
}
|
||||
|
||||
// SetFlags converts any flags into config which weren't straight foward
|
||||
|
||||
@@ -13,9 +13,15 @@ var (
|
||||
Opt = filter.DefaultOpt
|
||||
)
|
||||
|
||||
// Reload the filters from the flags
|
||||
func Reload() (err error) {
|
||||
filter.Active, err = filter.NewFilter(&Opt)
|
||||
return err
|
||||
}
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOption("filter", &Opt)
|
||||
rc.AddOptionReload("filter", &Opt, Reload)
|
||||
flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file")
|
||||
|
||||
83
fs/fs.go
83
fs/fs.go
@@ -16,8 +16,10 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -59,7 +61,7 @@ var (
|
||||
ErrorNotAFile = errors.New("is a not a regular file")
|
||||
ErrorNotDeleting = errors.New("not deleting files as there were IO errors")
|
||||
ErrorNotDeletingDirs = errors.New("not deleting directories as there were IO errors")
|
||||
ErrorCantMoveOverlapping = errors.New("can't move files on overlapping remotes")
|
||||
ErrorOverlapping = errors.New("can't sync or move files on overlapping remotes")
|
||||
ErrorDirectoryNotEmpty = errors.New("directory not empty")
|
||||
ErrorImmutableModified = errors.New("immutable file modified")
|
||||
ErrorPermissionDenied = errors.New("permission denied")
|
||||
@@ -407,6 +409,7 @@ type Features struct {
|
||||
BucketBased bool // is bucket based (like s3, swift etc)
|
||||
SetTier bool // allows set tier functionality on objects
|
||||
GetTier bool // allows to retrieve storage tier of objects
|
||||
ServerSideAcrossConfigs bool // can server side copy between different remotes of the same type
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
@@ -1112,3 +1115,81 @@ func GetModifyWindow(fss ...Info) time.Duration {
|
||||
}
|
||||
return window
|
||||
}
|
||||
|
||||
// Pacer is a simple wrapper around a pacer.Pacer with logging.
|
||||
type Pacer struct {
|
||||
*pacer.Pacer
|
||||
}
|
||||
|
||||
type logCalculator struct {
|
||||
pacer.Calculator
|
||||
}
|
||||
|
||||
// NewPacer creates a Pacer for the given Fs and Calculator.
|
||||
func NewPacer(c pacer.Calculator) *Pacer {
|
||||
p := &Pacer{
|
||||
Pacer: pacer.New(
|
||||
pacer.InvokerOption(pacerInvoker),
|
||||
pacer.MaxConnectionsOption(Config.Checkers+Config.Transfers),
|
||||
pacer.RetriesOption(Config.LowLevelRetries),
|
||||
pacer.CalculatorOption(c),
|
||||
),
|
||||
}
|
||||
p.SetCalculator(c)
|
||||
return p
|
||||
}
|
||||
|
||||
func (d *logCalculator) Calculate(state pacer.State) time.Duration {
|
||||
oldSleepTime := state.SleepTime
|
||||
newSleepTime := d.Calculator.Calculate(state)
|
||||
if state.ConsecutiveRetries > 0 {
|
||||
if newSleepTime != oldSleepTime {
|
||||
Debugf("pacer", "Rate limited, increasing sleep to %v", newSleepTime)
|
||||
}
|
||||
} else {
|
||||
if newSleepTime != oldSleepTime {
|
||||
Debugf("pacer", "Reducing sleep to %v", newSleepTime)
|
||||
}
|
||||
}
|
||||
return newSleepTime
|
||||
}
|
||||
|
||||
// SetCalculator sets the pacing algorithm. Don't modify the Calculator object
|
||||
// afterwards, use the ModifyCalculator method when needed.
|
||||
//
|
||||
// It will choose the default algorithm if nil is passed in.
|
||||
func (p *Pacer) SetCalculator(c pacer.Calculator) {
|
||||
switch c.(type) {
|
||||
case *logCalculator:
|
||||
Logf("pacer", "Invalid Calculator in fs.Pacer.SetCalculator")
|
||||
case nil:
|
||||
c = &logCalculator{pacer.NewDefault()}
|
||||
default:
|
||||
c = &logCalculator{c}
|
||||
}
|
||||
|
||||
p.Pacer.SetCalculator(c)
|
||||
}
|
||||
|
||||
// ModifyCalculator calls the given function with the currently configured
|
||||
// Calculator and the Pacer lock held.
|
||||
func (p *Pacer) ModifyCalculator(f func(pacer.Calculator)) {
|
||||
p.ModifyCalculator(func(c pacer.Calculator) {
|
||||
switch _c := c.(type) {
|
||||
case *logCalculator:
|
||||
f(_c.Calculator)
|
||||
default:
|
||||
Logf("pacer", "Invalid Calculator in fs.Pacer: %t", c)
|
||||
f(c)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func pacerInvoker(try, retries int, f pacer.Paced) (retry bool, err error) {
|
||||
retry, err = f()
|
||||
if retry {
|
||||
Debugf("pacer", "low level retry %d/%d (error %v)", try, retries, err)
|
||||
err = fserrors.RetryError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2,8 +2,15 @@ package fs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -70,3 +77,47 @@ func TestOption(t *testing.T) {
|
||||
err = d.Set("sdfsdf")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
var errFoo = errors.New("foo")
|
||||
|
||||
type dummyPaced struct {
|
||||
retry bool
|
||||
called int
|
||||
wait *sync.Cond
|
||||
}
|
||||
|
||||
func (dp *dummyPaced) fn() (bool, error) {
|
||||
if dp.wait != nil {
|
||||
dp.wait.L.Lock()
|
||||
dp.wait.Wait()
|
||||
dp.wait.L.Unlock()
|
||||
}
|
||||
dp.called++
|
||||
return dp.retry, errFoo
|
||||
}
|
||||
|
||||
func TestPacerCall(t *testing.T) {
|
||||
expectedCalled := Config.LowLevelRetries
|
||||
if expectedCalled == 0 {
|
||||
expectedCalled = 20
|
||||
Config.LowLevelRetries = expectedCalled
|
||||
defer func() {
|
||||
Config.LowLevelRetries = 0
|
||||
}()
|
||||
}
|
||||
p := NewPacer(pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.Call(dp.fn)
|
||||
require.Equal(t, expectedCalled, dp.called)
|
||||
require.Implements(t, (*fserrors.Retrier)(nil), err)
|
||||
}
|
||||
|
||||
func TestPacerCallNoRetry(t *testing.T) {
|
||||
p := NewPacer(pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.CallNoRetry(dp.fn)
|
||||
require.Equal(t, 1, dp.called)
|
||||
require.Implements(t, (*fserrors.Retrier)(nil), err)
|
||||
}
|
||||
|
||||
@@ -194,7 +194,7 @@ func Cause(cause error) (retriable bool, err error) {
|
||||
// this case.
|
||||
err = prev
|
||||
}
|
||||
if err == prev {
|
||||
if reflect.DeepEqual(err, prev) {
|
||||
// Unpack any struct or *struct with a field
|
||||
// of name Err which satisfies the error
|
||||
// interface. This includes *url.Error,
|
||||
@@ -215,7 +215,7 @@ func Cause(cause error) (retriable bool, err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == prev {
|
||||
if reflect.DeepEqual(err, prev) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,9 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
@@ -130,7 +133,39 @@ func NewTransport(ci *fs.ConfigInfo) http.RoundTripper {
|
||||
t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost
|
||||
t.TLSHandshakeTimeout = ci.ConnectTimeout
|
||||
t.ResponseHeaderTimeout = ci.Timeout
|
||||
t.TLSClientConfig = &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify}
|
||||
|
||||
// TLS Config
|
||||
t.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: ci.InsecureSkipVerify,
|
||||
}
|
||||
|
||||
// Load client certs
|
||||
if ci.ClientCert != "" || ci.ClientKey != "" {
|
||||
if ci.ClientCert == "" || ci.ClientKey == "" {
|
||||
log.Fatalf("Both --client-cert and --client-key must be set")
|
||||
}
|
||||
cert, err := tls.LoadX509KeyPair(ci.ClientCert, ci.ClientKey)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load --client-cert/--client-key pair: %v", err)
|
||||
}
|
||||
t.TLSClientConfig.Certificates = []tls.Certificate{cert}
|
||||
t.TLSClientConfig.BuildNameToCertificate()
|
||||
}
|
||||
|
||||
// Load CA cert
|
||||
if ci.CaCert != "" {
|
||||
caCert, err := ioutil.ReadFile(ci.CaCert)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read --ca-cert: %v", err)
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
ok := caCertPool.AppendCertsFromPEM(caCert)
|
||||
if !ok {
|
||||
log.Fatalf("Failed to add certificates from --ca-cert")
|
||||
}
|
||||
t.TLSClientConfig.RootCAs = caCertPool
|
||||
}
|
||||
|
||||
t.DisableCompression = ci.NoGzip
|
||||
t.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return dialContextTimeout(ctx, network, addr, ci)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -272,7 +273,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec
|
||||
// Try server side copy first - if has optional interface and
|
||||
// is same underlying remote
|
||||
actionTaken = "Copied (server side copy)"
|
||||
if doCopy := f.Features().Copy; doCopy != nil && SameConfig(src.Fs(), f) {
|
||||
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
|
||||
newDst, err = doCopy(src, remote)
|
||||
if err == nil {
|
||||
dst = newDst
|
||||
@@ -283,7 +284,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec
|
||||
// If can't server side copy, do it manually
|
||||
if err == fs.ErrorCantCopy {
|
||||
var in0 io.ReadCloser
|
||||
in0, err = src.Open(hashOption)
|
||||
in0, err = newReOpen(src, hashOption, fs.Config.LowLevelRetries)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to open source object")
|
||||
} else {
|
||||
@@ -391,7 +392,7 @@ func Move(fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Ob
|
||||
return newDst, nil
|
||||
}
|
||||
// See if we have Move available
|
||||
if doMove := fdst.Features().Move; doMove != nil && SameConfig(src.Fs(), fdst) {
|
||||
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) {
|
||||
// Delete destination if it exists
|
||||
if dst != nil {
|
||||
err = DeleteFile(dst)
|
||||
@@ -524,6 +525,11 @@ func DeleteFiles(toBeDeleted fs.ObjectsChan) error {
|
||||
return DeleteFilesWithBackupDir(toBeDeleted, nil)
|
||||
}
|
||||
|
||||
// SameRemoteType returns true if fdst and fsrc are the same type
|
||||
func SameRemoteType(fdst, fsrc fs.Info) bool {
|
||||
return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc)
|
||||
}
|
||||
|
||||
// SameConfig returns true if fdst and fsrc are using the same config
|
||||
// file entry
|
||||
func SameConfig(fdst, fsrc fs.Info) bool {
|
||||
@@ -532,7 +538,7 @@ func SameConfig(fdst, fsrc fs.Info) bool {
|
||||
|
||||
// Same returns true if fdst and fsrc point to the same underlying Fs
|
||||
func Same(fdst, fsrc fs.Info) bool {
|
||||
return SameConfig(fdst, fsrc) && fdst.Root() == fsrc.Root()
|
||||
return SameConfig(fdst, fsrc) && strings.Trim(fdst.Root(), "/") == strings.Trim(fsrc.Root(), "/")
|
||||
}
|
||||
|
||||
// Overlapping returns true if fdst and fsrc point to the same
|
||||
@@ -543,7 +549,7 @@ func Overlapping(fdst, fsrc fs.Info) bool {
|
||||
}
|
||||
// Return the Root with a trailing / if not empty
|
||||
fixedRoot := func(f fs.Info) string {
|
||||
s := strings.Trim(f.Root(), "/")
|
||||
s := strings.Trim(filepath.ToSlash(f.Root()), "/")
|
||||
if s != "" {
|
||||
s += "/"
|
||||
}
|
||||
@@ -1479,8 +1485,7 @@ type ListFormat struct {
|
||||
separator string
|
||||
dirSlash bool
|
||||
absolute bool
|
||||
output []func() string
|
||||
entry fs.DirEntry
|
||||
output []func(entry *ListJSONItem) string
|
||||
csv *csv.Writer
|
||||
buf bytes.Buffer
|
||||
}
|
||||
@@ -1516,76 +1521,91 @@ func (l *ListFormat) SetCSV(useCSV bool) {
|
||||
}
|
||||
|
||||
// SetOutput sets functions used to create files information
|
||||
func (l *ListFormat) SetOutput(output []func() string) {
|
||||
func (l *ListFormat) SetOutput(output []func(entry *ListJSONItem) string) {
|
||||
l.output = output
|
||||
}
|
||||
|
||||
// AddModTime adds file's Mod Time to output
|
||||
func (l *ListFormat) AddModTime() {
|
||||
l.AppendOutput(func() string { return l.entry.ModTime().Local().Format("2006-01-02 15:04:05") })
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return entry.ModTime.When.Local().Format("2006-01-02 15:04:05")
|
||||
})
|
||||
}
|
||||
|
||||
// AddSize adds file's size to output
|
||||
func (l *ListFormat) AddSize() {
|
||||
l.AppendOutput(func() string {
|
||||
return strconv.FormatInt(l.entry.Size(), 10)
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return strconv.FormatInt(entry.Size, 10)
|
||||
})
|
||||
}
|
||||
|
||||
// normalisePath makes sure the path has the correct slashes for the current mode
|
||||
func (l *ListFormat) normalisePath(entry *ListJSONItem, remote string) string {
|
||||
if l.absolute && !strings.HasPrefix(remote, "/") {
|
||||
remote = "/" + remote
|
||||
}
|
||||
if entry.IsDir && l.dirSlash {
|
||||
remote += "/"
|
||||
}
|
||||
return remote
|
||||
}
|
||||
|
||||
// AddPath adds path to file to output
|
||||
func (l *ListFormat) AddPath() {
|
||||
l.AppendOutput(func() string {
|
||||
remote := l.entry.Remote()
|
||||
if l.absolute && !strings.HasPrefix(remote, "/") {
|
||||
remote = "/" + remote
|
||||
}
|
||||
_, isDir := l.entry.(fs.Directory)
|
||||
if isDir && l.dirSlash {
|
||||
remote += "/"
|
||||
}
|
||||
return remote
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return l.normalisePath(entry, entry.Path)
|
||||
})
|
||||
}
|
||||
|
||||
// AddEncrypted adds the encrypted path to file to output
|
||||
func (l *ListFormat) AddEncrypted() {
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return l.normalisePath(entry, entry.Encrypted)
|
||||
})
|
||||
}
|
||||
|
||||
// AddHash adds the hash of the type given to the output
|
||||
func (l *ListFormat) AddHash(ht hash.Type) {
|
||||
l.AppendOutput(func() string {
|
||||
o, ok := l.entry.(fs.Object)
|
||||
if !ok {
|
||||
hashName := ht.String()
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
if entry.IsDir {
|
||||
return ""
|
||||
}
|
||||
return hashSum(ht, o)
|
||||
return entry.Hashes[hashName]
|
||||
})
|
||||
}
|
||||
|
||||
// AddID adds file's ID to the output if known
|
||||
func (l *ListFormat) AddID() {
|
||||
l.AppendOutput(func() string {
|
||||
if do, ok := l.entry.(fs.IDer); ok {
|
||||
return do.ID()
|
||||
}
|
||||
return ""
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return entry.ID
|
||||
})
|
||||
}
|
||||
|
||||
// AddOrigID adds file's Original ID to the output if known
|
||||
func (l *ListFormat) AddOrigID() {
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return entry.OrigID
|
||||
})
|
||||
}
|
||||
|
||||
// AddMimeType adds file's MimeType to the output if known
|
||||
func (l *ListFormat) AddMimeType() {
|
||||
l.AppendOutput(func() string {
|
||||
return fs.MimeTypeDirEntry(l.entry)
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return entry.MimeType
|
||||
})
|
||||
}
|
||||
|
||||
// AppendOutput adds string generated by specific function to printed output
|
||||
func (l *ListFormat) AppendOutput(functionToAppend func() string) {
|
||||
func (l *ListFormat) AppendOutput(functionToAppend func(item *ListJSONItem) string) {
|
||||
l.output = append(l.output, functionToAppend)
|
||||
}
|
||||
|
||||
// Format prints information about the DirEntry in the format defined
|
||||
func (l *ListFormat) Format(entry fs.DirEntry) (result string) {
|
||||
l.entry = entry
|
||||
func (l *ListFormat) Format(entry *ListJSONItem) (result string) {
|
||||
var out []string
|
||||
for _, fun := range l.output {
|
||||
out = append(out, fun())
|
||||
out = append(out, fun(entry))
|
||||
}
|
||||
if l.csv != nil {
|
||||
l.buf.Reset()
|
||||
|
||||
@@ -39,7 +39,6 @@ import (
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/filter"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/list"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -778,6 +777,7 @@ func TestSame(t *testing.T) {
|
||||
|
||||
func TestOverlapping(t *testing.T) {
|
||||
a := &testFsInfo{name: "name", root: "root"}
|
||||
slash := string(os.PathSeparator) // native path separator
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
root string
|
||||
@@ -790,6 +790,8 @@ func TestOverlapping(t *testing.T) {
|
||||
{"name", "roo", false},
|
||||
{"name", "root/toot", true},
|
||||
{"name", "root/toot/", true},
|
||||
{"name", "root" + slash + "toot", true},
|
||||
{"name", "root" + slash + "toot" + slash, true},
|
||||
{"name", "", true},
|
||||
{"name", "/", true},
|
||||
} {
|
||||
@@ -873,61 +875,90 @@ func TestCheckEqualReaders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListFormat(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("a", "a", t1)
|
||||
file2 := r.WriteObject("subdir/b", "b", t1)
|
||||
item0 := &operations.ListJSONItem{
|
||||
Path: "a",
|
||||
Name: "a",
|
||||
Encrypted: "encryptedFileName",
|
||||
Size: 1,
|
||||
MimeType: "application/octet-stream",
|
||||
ModTime: operations.Timestamp{
|
||||
When: t1,
|
||||
Format: "2006-01-02T15:04:05.000000000Z07:00"},
|
||||
IsDir: false,
|
||||
Hashes: map[string]string{
|
||||
"MD5": "0cc175b9c0f1b6a831c399e269772661",
|
||||
"SHA-1": "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8",
|
||||
"DropboxHash": "bf5d3affb73efd2ec6c36ad3112dd933efed63c4e1cbffcfa88e2759c144f2d8",
|
||||
"QuickXorHash": "6100000000000000000000000100000000000000"},
|
||||
ID: "fileID",
|
||||
OrigID: "fileOrigID",
|
||||
}
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
item1 := &operations.ListJSONItem{
|
||||
Path: "subdir",
|
||||
Name: "subdir",
|
||||
Encrypted: "encryptedDirName",
|
||||
Size: -1,
|
||||
MimeType: "inode/directory",
|
||||
ModTime: operations.Timestamp{
|
||||
When: t2,
|
||||
Format: "2006-01-02T15:04:05.000000000Z07:00"},
|
||||
IsDir: true,
|
||||
Hashes: map[string]string(nil),
|
||||
ID: "dirID",
|
||||
OrigID: "dirOrigID",
|
||||
}
|
||||
|
||||
items, _ := list.DirSorted(r.Fremote, true, "")
|
||||
var list operations.ListFormat
|
||||
list.AddPath()
|
||||
list.SetDirSlash(false)
|
||||
assert.Equal(t, "subdir", list.Format(items[1]))
|
||||
assert.Equal(t, "subdir", list.Format(item1))
|
||||
|
||||
list.SetDirSlash(true)
|
||||
assert.Equal(t, "subdir/", list.Format(items[1]))
|
||||
assert.Equal(t, "subdir/", list.Format(item1))
|
||||
|
||||
list.SetOutput(nil)
|
||||
assert.Equal(t, "", list.Format(items[1]))
|
||||
assert.Equal(t, "", list.Format(item1))
|
||||
|
||||
list.AppendOutput(func() string { return "a" })
|
||||
list.AppendOutput(func() string { return "b" })
|
||||
assert.Equal(t, "ab", list.Format(items[1]))
|
||||
list.AppendOutput(func(item *operations.ListJSONItem) string { return "a" })
|
||||
list.AppendOutput(func(item *operations.ListJSONItem) string { return "b" })
|
||||
assert.Equal(t, "ab", list.Format(item1))
|
||||
list.SetSeparator(":::")
|
||||
assert.Equal(t, "a:::b", list.Format(items[1]))
|
||||
assert.Equal(t, "a:::b", list.Format(item1))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddModTime()
|
||||
assert.Equal(t, items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0]))
|
||||
assert.Equal(t, t1.Local().Format("2006-01-02 15:04:05"), list.Format(item0))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.SetSeparator("|")
|
||||
list.AddID()
|
||||
_ = list.Format(items[0]) // Can't really check anything - at least it didn't panic!
|
||||
list.AddOrigID()
|
||||
assert.Equal(t, "fileID|fileOrigID", list.Format(item0))
|
||||
assert.Equal(t, "dirID|dirOrigID", list.Format(item1))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddMimeType()
|
||||
assert.Contains(t, list.Format(items[0]), "/")
|
||||
assert.Equal(t, "inode/directory", list.Format(items[1]))
|
||||
assert.Contains(t, list.Format(item0), "/")
|
||||
assert.Equal(t, "inode/directory", list.Format(item1))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddPath()
|
||||
list.SetAbsolute(true)
|
||||
assert.Equal(t, "/a", list.Format(items[0]))
|
||||
assert.Equal(t, "/a", list.Format(item0))
|
||||
list.SetAbsolute(false)
|
||||
assert.Equal(t, "a", list.Format(items[0]))
|
||||
assert.Equal(t, "a", list.Format(item0))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddSize()
|
||||
assert.Equal(t, "1", list.Format(items[0]))
|
||||
assert.Equal(t, "1", list.Format(item0))
|
||||
|
||||
list.AddPath()
|
||||
list.AddModTime()
|
||||
list.SetDirSlash(true)
|
||||
list.SetSeparator("__SEP__")
|
||||
assert.Equal(t, "1__SEP__a__SEP__"+items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0]))
|
||||
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"__SEP__subdir/__SEP__"+items[1].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[1]))
|
||||
assert.Equal(t, "1__SEP__a__SEP__"+t1.Local().Format("2006-01-02 15:04:05"), list.Format(item0))
|
||||
assert.Equal(t, "-1__SEP__subdir/__SEP__"+t2.Local().Format("2006-01-02 15:04:05"), list.Format(item1))
|
||||
|
||||
for _, test := range []struct {
|
||||
ht hash.Type
|
||||
@@ -939,10 +970,7 @@ func TestListFormat(t *testing.T) {
|
||||
} {
|
||||
list.SetOutput(nil)
|
||||
list.AddHash(test.ht)
|
||||
got := list.Format(items[0])
|
||||
if got != "UNSUPPORTED" && got != "" {
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
assert.Equal(t, test.want, list.Format(item0))
|
||||
}
|
||||
|
||||
list.SetOutput(nil)
|
||||
@@ -952,8 +980,15 @@ func TestListFormat(t *testing.T) {
|
||||
list.AddPath()
|
||||
list.AddModTime()
|
||||
list.SetDirSlash(true)
|
||||
assert.Equal(t, "1|a|"+items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0]))
|
||||
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"|subdir/|"+items[1].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[1]))
|
||||
assert.Equal(t, "1|a|"+t1.Local().Format("2006-01-02 15:04:05"), list.Format(item0))
|
||||
assert.Equal(t, "-1|subdir/|"+t2.Local().Format("2006-01-02 15:04:05"), list.Format(item1))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.SetSeparator("|")
|
||||
list.AddPath()
|
||||
list.AddEncrypted()
|
||||
assert.Equal(t, "a|encryptedFileName", list.Format(item0))
|
||||
assert.Equal(t, "subdir/|encryptedDirName/", list.Format(item1))
|
||||
|
||||
}
|
||||
|
||||
|
||||
111
fs/operations/reopen.go
Normal file
111
fs/operations/reopen.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// reOpen is a wrapper for an object reader which reopens the stream on error
|
||||
type reOpen struct {
|
||||
mu sync.Mutex // mutex to protect the below
|
||||
src fs.Object // object to open
|
||||
hashOption *fs.HashesOption // option to pass to initial open
|
||||
rc io.ReadCloser // underlying stream
|
||||
read int64 // number of bytes read from this stream
|
||||
maxTries int // maximum number of retries
|
||||
tries int // number of retries we've had so far in this stream
|
||||
err error // if this is set then Read/Close calls will return it
|
||||
opened bool // if set then rc is valid and needs closing
|
||||
}
|
||||
|
||||
var (
|
||||
errorFileClosed = errors.New("file already closed")
|
||||
errorTooManyTries = errors.New("failed to reopen: too many retries")
|
||||
)
|
||||
|
||||
// newReOpen makes a handle which will reopen itself and seek to where it was on errors
|
||||
func newReOpen(src fs.Object, hashOption *fs.HashesOption, maxTries int) (rc io.ReadCloser, err error) {
|
||||
h := &reOpen{
|
||||
src: src,
|
||||
hashOption: hashOption,
|
||||
maxTries: maxTries,
|
||||
}
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
err = h.open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// open the underlying handle - call with lock held
|
||||
//
|
||||
// we don't retry here as the Open() call will itself have low level retries
|
||||
func (h *reOpen) open() error {
|
||||
var opts = make([]fs.OpenOption, 1)
|
||||
if h.tries > 0 {
|
||||
}
|
||||
if h.read == 0 {
|
||||
// put hashOption on if reading from the start, ditch otherwise
|
||||
opts[0] = h.hashOption
|
||||
} else {
|
||||
// seek to the read point
|
||||
opts[0] = &fs.SeekOption{Offset: h.read}
|
||||
}
|
||||
h.tries++
|
||||
if h.tries > h.maxTries {
|
||||
h.err = errorTooManyTries
|
||||
} else {
|
||||
h.rc, h.err = h.src.Open(opts...)
|
||||
}
|
||||
if h.err != nil {
|
||||
if h.tries > 1 {
|
||||
fs.Debugf(h.src, "Reopen failed after %d bytes read: %v", h.read, h.err)
|
||||
}
|
||||
return h.err
|
||||
}
|
||||
h.opened = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read bytes retrying as necessary
|
||||
func (h *reOpen) Read(p []byte) (n int, err error) {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
if h.err != nil {
|
||||
// return a previous error if there is one
|
||||
return n, h.err
|
||||
}
|
||||
n, err = h.rc.Read(p)
|
||||
if err != nil {
|
||||
h.err = err
|
||||
}
|
||||
h.read += int64(n)
|
||||
if err != nil && err != io.EOF {
|
||||
// close underlying stream
|
||||
h.opened = false
|
||||
_ = h.rc.Close()
|
||||
// reopen stream, clearing error if successful
|
||||
fs.Debugf(h.src, "Reopening on read failure after %d bytes: retry %d/%d: %v", h.read, h.tries, h.maxTries, err)
|
||||
if h.open() == nil {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close the stream
|
||||
func (h *reOpen) Close() error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
if !h.opened {
|
||||
return errorFileClosed
|
||||
}
|
||||
h.opened = false
|
||||
h.err = errorFileClosed
|
||||
return h.rc.Close()
|
||||
}
|
||||
144
fs/operations/reopen_test.go
Normal file
144
fs/operations/reopen_test.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fstest/mockobject"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// check interface
|
||||
var _ io.ReadCloser = (*reOpen)(nil)
|
||||
|
||||
var errorTestError = errors.New("test error")
|
||||
|
||||
// this is a wrapper for an mockobject with a custom Open function
|
||||
//
|
||||
// breaks indicate the number of bytes to read before returning an
|
||||
// error
|
||||
type reOpenTestObject struct {
|
||||
fs.Object
|
||||
breaks []int64
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
//
|
||||
// This will break after reading the number of bytes in breaks
|
||||
func (o *reOpenTestObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
rc, err := o.Object.Open(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(o.breaks) > 0 {
|
||||
// Pop a breakpoint off
|
||||
N := o.breaks[0]
|
||||
o.breaks = o.breaks[1:]
|
||||
// If 0 then return an error immediately
|
||||
if N == 0 {
|
||||
return nil, errorTestError
|
||||
}
|
||||
// Read N bytes then an error
|
||||
r := io.MultiReader(&io.LimitedReader{R: rc, N: N}, errorReader{errorTestError})
|
||||
// Wrap with Close in a new readCloser
|
||||
rc = readCloser{Reader: r, Closer: rc}
|
||||
}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// Return an error only
|
||||
type errorReader struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// Read returning an error
|
||||
func (er errorReader) Read(p []byte) (n int, err error) {
|
||||
return 0, er.err
|
||||
}
|
||||
|
||||
// Contents for the mock object
|
||||
var reOpenTestcontents = []byte("0123456789")
|
||||
|
||||
// Start the test with the given breaks
|
||||
func testReOpen(breaks []int64, maxRetries int) (io.ReadCloser, error) {
|
||||
srcOrig := mockobject.New("potato").WithContent(reOpenTestcontents, mockobject.SeekModeRegular)
|
||||
src := &reOpenTestObject{
|
||||
Object: srcOrig,
|
||||
breaks: breaks,
|
||||
}
|
||||
hashOption := &fs.HashesOption{Hashes: hash.NewHashSet(hash.MD5)}
|
||||
return newReOpen(src, hashOption, maxRetries)
|
||||
}
|
||||
|
||||
func TestReOpenBasics(t *testing.T) {
|
||||
// open
|
||||
h, err := testReOpen(nil, 10)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check contents read correctly
|
||||
got, err := ioutil.ReadAll(h)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, reOpenTestcontents, got)
|
||||
|
||||
// Check read after end
|
||||
var buf = make([]byte, 1)
|
||||
n, err := h.Read(buf)
|
||||
assert.Equal(t, 0, n)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
|
||||
// Check close
|
||||
assert.NoError(t, h.Close())
|
||||
|
||||
// Check double close
|
||||
assert.Equal(t, errorFileClosed, h.Close())
|
||||
|
||||
// Check read after close
|
||||
n, err = h.Read(buf)
|
||||
assert.Equal(t, 0, n)
|
||||
assert.Equal(t, errorFileClosed, err)
|
||||
}
|
||||
|
||||
func TestReOpenErrorAtStart(t *testing.T) {
|
||||
// open with immediate breaking
|
||||
h, err := testReOpen([]int64{0}, 10)
|
||||
assert.Equal(t, errorTestError, err)
|
||||
assert.Nil(t, h)
|
||||
}
|
||||
|
||||
func TestReOpenError(t *testing.T) {
|
||||
// open with a few break points but less than the max
|
||||
h, err := testReOpen([]int64{2, 1, 3}, 10)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check contents
|
||||
got, err := ioutil.ReadAll(h)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, reOpenTestcontents, got)
|
||||
|
||||
// check close
|
||||
assert.NoError(t, h.Close())
|
||||
}
|
||||
|
||||
func TestReOpenFail(t *testing.T) {
|
||||
// open with a few break points but >= the max
|
||||
h, err := testReOpen([]int64{2, 1, 3}, 3)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check contents
|
||||
got, err := ioutil.ReadAll(h)
|
||||
assert.Equal(t, errorTestError, err)
|
||||
assert.Equal(t, reOpenTestcontents[:6], got)
|
||||
|
||||
// check old error is returned
|
||||
var buf = make([]byte, 1)
|
||||
n, err := h.Read(buf)
|
||||
assert.Equal(t, 0, n)
|
||||
assert.Equal(t, errorTooManyTries, err)
|
||||
|
||||
// Check close
|
||||
assert.Equal(t, errorFileClosed, h.Close())
|
||||
}
|
||||
@@ -8,13 +8,23 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var optionBlock = map[string]interface{}{}
|
||||
var (
|
||||
optionBlock = map[string]interface{}{}
|
||||
optionReload = map[string]func() error{}
|
||||
)
|
||||
|
||||
// AddOption adds an option set
|
||||
func AddOption(name string, option interface{}) {
|
||||
optionBlock[name] = option
|
||||
}
|
||||
|
||||
// AddOptionReload adds an option set with a reload function to be
|
||||
// called when options are changed
|
||||
func AddOptionReload(name string, option interface{}, reload func() error) {
|
||||
optionBlock[name] = option
|
||||
optionReload[name] = reload
|
||||
}
|
||||
|
||||
func init() {
|
||||
Add(Call{
|
||||
Path: "options/blocks",
|
||||
@@ -103,7 +113,12 @@ func rcOptionsSet(in Params) (out Params, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to write options from block %q", name)
|
||||
}
|
||||
|
||||
if reload := optionReload[name]; reload != nil {
|
||||
err = reload()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reload options from block %q", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package rc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -24,9 +26,21 @@ func TestAddOption(t *testing.T) {
|
||||
assert.Equal(t, len(optionBlock), 0)
|
||||
AddOption("potato", &testOptions)
|
||||
assert.Equal(t, len(optionBlock), 1)
|
||||
assert.Equal(t, len(optionReload), 0)
|
||||
assert.Equal(t, &testOptions, optionBlock["potato"])
|
||||
}
|
||||
|
||||
func TestAddOptionReload(t *testing.T) {
|
||||
defer clearOptionBlock()
|
||||
assert.Equal(t, len(optionBlock), 0)
|
||||
reload := func() error { return nil }
|
||||
AddOptionReload("potato", &testOptions, reload)
|
||||
assert.Equal(t, len(optionBlock), 1)
|
||||
assert.Equal(t, len(optionReload), 1)
|
||||
assert.Equal(t, &testOptions, optionBlock["potato"])
|
||||
assert.Equal(t, fmt.Sprintf("%p", reload), fmt.Sprintf("%p", optionReload["potato"]))
|
||||
}
|
||||
|
||||
func TestOptionsBlocks(t *testing.T) {
|
||||
defer clearOptionBlock()
|
||||
AddOption("potato", &testOptions)
|
||||
@@ -53,7 +67,14 @@ func TestOptionsGet(t *testing.T) {
|
||||
|
||||
func TestOptionsSet(t *testing.T) {
|
||||
defer clearOptionBlock()
|
||||
AddOption("potato", &testOptions)
|
||||
var reloaded int
|
||||
AddOptionReload("potato", &testOptions, func() error {
|
||||
if reloaded > 0 {
|
||||
return errors.New("error while reloading")
|
||||
}
|
||||
reloaded++
|
||||
return nil
|
||||
})
|
||||
call := Calls.Get("options/set")
|
||||
require.NotNil(t, call)
|
||||
|
||||
@@ -67,6 +88,12 @@ func TestOptionsSet(t *testing.T) {
|
||||
require.Nil(t, out)
|
||||
assert.Equal(t, 50, testOptions.Int)
|
||||
assert.Equal(t, "hello", testOptions.String)
|
||||
assert.Equal(t, 1, reloaded)
|
||||
|
||||
// error from reload
|
||||
_, err = call.Fn(in)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "error while reloading")
|
||||
|
||||
// unknown option block
|
||||
in = Params{
|
||||
@@ -85,4 +112,5 @@ func TestOptionsSet(t *testing.T) {
|
||||
_, err = call.Fn(in)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to write options")
|
||||
|
||||
}
|
||||
|
||||
@@ -39,17 +39,21 @@ func rcSyncCopyMove(in rc.Params, name string) (out rc.Params, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createEmptySrcDirs, err := in.GetBool("createEmptySrcDirs")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
switch name {
|
||||
case "sync":
|
||||
return nil, Sync(dstFs, srcFs)
|
||||
return nil, Sync(dstFs, srcFs, createEmptySrcDirs)
|
||||
case "copy":
|
||||
return nil, CopyDir(dstFs, srcFs)
|
||||
return nil, CopyDir(dstFs, srcFs, createEmptySrcDirs)
|
||||
case "move":
|
||||
deleteEmptySrcDirs, err := in.GetBool("deleteEmptySrcDirs")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs)
|
||||
return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs, createEmptySrcDirs)
|
||||
}
|
||||
panic("unknown rcSyncCopyMove type")
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ type syncCopyMove struct {
|
||||
fsrc fs.Fs
|
||||
deleteMode fs.DeleteMode // how we are doing deletions
|
||||
DoMove bool
|
||||
copyEmptySrcDirs bool
|
||||
deleteEmptySrcDirs bool
|
||||
dir string
|
||||
// internal state
|
||||
@@ -63,12 +64,16 @@ type syncCopyMove struct {
|
||||
suffix string // suffix to add to files placed in backupDir
|
||||
}
|
||||
|
||||
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) (*syncCopyMove, error) {
|
||||
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) {
|
||||
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.Overlapping(fdst, fsrc) {
|
||||
return nil, fserrors.FatalError(fs.ErrorOverlapping)
|
||||
}
|
||||
s := &syncCopyMove{
|
||||
fdst: fdst,
|
||||
fsrc: fsrc,
|
||||
deleteMode: deleteMode,
|
||||
DoMove: DoMove,
|
||||
copyEmptySrcDirs: copyEmptySrcDirs,
|
||||
deleteEmptySrcDirs: deleteEmptySrcDirs,
|
||||
dir: "",
|
||||
srcFilesChan: make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers),
|
||||
@@ -686,7 +691,9 @@ func (s *syncCopyMove) run() error {
|
||||
s.stopTransfers()
|
||||
s.stopDeleters()
|
||||
|
||||
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs))
|
||||
if s.copyEmptySrcDirs {
|
||||
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs))
|
||||
}
|
||||
|
||||
// Delete files after
|
||||
if s.deleteMode == fs.DeleteModeAfter {
|
||||
@@ -849,7 +856,7 @@ func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
|
||||
// If DoMove is true then files will be moved instead of copied
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) error {
|
||||
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
if deleteMode != fs.DeleteModeOff && DoMove {
|
||||
return fserrors.FatalError(errors.New("can't delete and move at the same time"))
|
||||
}
|
||||
@@ -859,7 +866,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
|
||||
}
|
||||
// only delete stuff during in this pass
|
||||
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs)
|
||||
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -870,7 +877,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
// Next pass does a copy only
|
||||
deleteMode = fs.DeleteModeOff
|
||||
}
|
||||
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs)
|
||||
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -878,22 +885,22 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
}
|
||||
|
||||
// Sync fsrc into fdst
|
||||
func Sync(fdst, fsrc fs.Fs) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false)
|
||||
func Sync(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false, copyEmptySrcDirs)
|
||||
}
|
||||
|
||||
// CopyDir copies fsrc into fdst
|
||||
func CopyDir(fdst, fsrc fs.Fs) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false)
|
||||
func CopyDir(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs)
|
||||
}
|
||||
|
||||
// moveDir moves fsrc into fdst
|
||||
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs)
|
||||
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
}
|
||||
|
||||
// MoveDir moves fsrc into fdst
|
||||
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
|
||||
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
if operations.Same(fdst, fsrc) {
|
||||
fs.Errorf(fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
@@ -920,13 +927,6 @@ func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
// The two remotes mustn't overlap if we didn't do server side move
|
||||
if operations.Overlapping(fdst, fsrc) {
|
||||
err := fs.ErrorCantMoveOverlapping
|
||||
fs.Errorf(fdst, "%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Otherwise move the files one by one
|
||||
return moveDir(fdst, fsrc, deleteEmptySrcDirs)
|
||||
return moveDir(fdst, fsrc, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/filter"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
@@ -39,7 +40,7 @@ func TestCopyWithDryRun(t *testing.T) {
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
fs.Config.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -54,7 +55,7 @@ func TestCopy(t *testing.T) {
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -71,7 +72,7 @@ func TestCopyNoTraverse(t *testing.T) {
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -89,7 +90,7 @@ func TestSyncNoTraverse(t *testing.T) {
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -107,7 +108,7 @@ func TestCopyWithDepth(t *testing.T) {
|
||||
fs.Config.MaxDepth = 1
|
||||
defer func() { fs.Config.MaxDepth = -1 }()
|
||||
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
@@ -135,7 +136,7 @@ func TestCopyWithFilesFrom(t *testing.T) {
|
||||
}
|
||||
defer unpatch()
|
||||
|
||||
err = CopyDir(r.Fremote, r.Flocal)
|
||||
err = CopyDir(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
unpatch()
|
||||
|
||||
@@ -152,7 +153,59 @@ func TestCopyEmptyDirectories(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err = CopyDir(r.Fremote, r.Flocal)
|
||||
err = CopyDir(r.Fremote, r.Flocal, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
[]fstest.Item{
|
||||
file1,
|
||||
},
|
||||
[]string{
|
||||
"sub dir",
|
||||
"sub dir2",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
}
|
||||
|
||||
// Test move empty directories
|
||||
func TestMoveEmptyDirectories(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
err := operations.Mkdir(r.Flocal, "sub dir2")
|
||||
require.NoError(t, err)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err = MoveDir(r.Fremote, r.Flocal, false, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
[]fstest.Item{
|
||||
file1,
|
||||
},
|
||||
[]string{
|
||||
"sub dir",
|
||||
"sub dir2",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
}
|
||||
|
||||
// Test sync empty directories
|
||||
func TestSyncEmptyDirectories(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
err := operations.Mkdir(r.Flocal, "sub dir2")
|
||||
require.NoError(t, err)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err = Sync(r.Fremote, r.Flocal, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -181,7 +234,7 @@ func TestServerSideCopy(t *testing.T) {
|
||||
defer finaliseCopy()
|
||||
t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy)
|
||||
|
||||
err = CopyDir(FremoteCopy, r.Fremote)
|
||||
err = CopyDir(FremoteCopy, r.Fremote, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, FremoteCopy, file1)
|
||||
@@ -199,7 +252,7 @@ func TestCopyAfterDelete(t *testing.T) {
|
||||
err := operations.Mkdir(r.Flocal, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = CopyDir(r.Fremote, r.Flocal)
|
||||
err = CopyDir(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal)
|
||||
@@ -213,7 +266,7 @@ func TestCopyRedownload(t *testing.T) {
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
err := CopyDir(r.Flocal, r.Fremote)
|
||||
err := CopyDir(r.Flocal, r.Fremote, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test with combined precision of local and remote as we copied it there and back
|
||||
@@ -233,7 +286,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
@@ -245,7 +298,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
@@ -267,7 +320,7 @@ func TestSyncSizeOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
@@ -279,7 +332,7 @@ func TestSyncSizeOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
@@ -301,7 +354,7 @@ func TestSyncIgnoreSize(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
@@ -313,7 +366,7 @@ func TestSyncIgnoreSize(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
@@ -329,7 +382,7 @@ func TestSyncIgnoreTimes(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly 0 files because the
|
||||
@@ -340,7 +393,7 @@ func TestSyncIgnoreTimes(t *testing.T) {
|
||||
defer func() { fs.Config.IgnoreTimes = false }()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file even though the
|
||||
@@ -360,7 +413,7 @@ func TestSyncIgnoreExisting(t *testing.T) {
|
||||
defer func() { fs.Config.IgnoreExisting = false }()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
@@ -368,7 +421,7 @@ func TestSyncIgnoreExisting(t *testing.T) {
|
||||
// Change everything
|
||||
r.WriteFile("existing", "newpotatoes", t2)
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
// Items should not change
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
@@ -416,7 +469,7 @@ func TestSyncIgnoreErrors(t *testing.T) {
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
fs.CountError(nil)
|
||||
assert.NoError(t, Sync(r.Fremote, r.Flocal))
|
||||
assert.NoError(t, Sync(r.Fremote, r.Flocal, false))
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
@@ -459,7 +512,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
defer func() { fs.Config.DryRun = false }()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -468,7 +521,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
fs.Config.DryRun = false
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -496,7 +549,7 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -517,7 +570,7 @@ func TestSyncDoesntUpdateModtime(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -537,7 +590,7 @@ func TestSyncAfterAddingAFile(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
@@ -552,7 +605,7 @@ func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
@@ -575,7 +628,7 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
@@ -591,7 +644,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
|
||||
|
||||
fs.Config.DryRun = true
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
fs.Config.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -610,7 +663,7 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file3)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file3)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file3)
|
||||
@@ -656,7 +709,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDir(t *testing.T) {
|
||||
)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -726,7 +779,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
fs.CountError(nil)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
assert.Equal(t, fs.ErrorNotDeleting, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -803,7 +856,7 @@ func TestCopyDeleteBefore(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
@@ -826,14 +879,14 @@ func TestSyncWithExclude(t *testing.T) {
|
||||
}()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Fremote, file2, file1)
|
||||
|
||||
// Now sync the other way round and check enormous doesn't get
|
||||
// deleted as it is excluded from the sync
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Flocal, r.Fremote)
|
||||
err = Sync(r.Flocal, r.Fremote, false)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file2, file1, file3)
|
||||
}
|
||||
@@ -856,14 +909,14 @@ func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
|
||||
}()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
// Check sync the other way round to make sure enormous gets
|
||||
// deleted even though it is excluded
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Flocal, r.Fremote)
|
||||
err = Sync(r.Flocal, r.Fremote, false)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
}
|
||||
@@ -898,7 +951,7 @@ func TestSyncWithUpdateOlder(t *testing.T) {
|
||||
}()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Fremote, oneO, twoF, threeO, fourF, fiveF)
|
||||
}
|
||||
@@ -922,7 +975,7 @@ func TestSyncWithTrackRenames(t *testing.T) {
|
||||
f2 := r.WriteFile("yam", "Yam Content", t2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
require.NoError(t, Sync(r.Fremote, r.Flocal))
|
||||
require.NoError(t, Sync(r.Fremote, r.Flocal, false))
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, f1, f2)
|
||||
fstest.CheckItems(t, r.Flocal, f1, f2)
|
||||
@@ -931,7 +984,7 @@ func TestSyncWithTrackRenames(t *testing.T) {
|
||||
f2 = r.RenameFile(f2, "yaml")
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
require.NoError(t, Sync(r.Fremote, r.Flocal))
|
||||
require.NoError(t, Sync(r.Fremote, r.Flocal, false))
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, f1, f2)
|
||||
|
||||
@@ -968,7 +1021,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
|
||||
|
||||
// Do server side move
|
||||
accounting.Stats.ResetCounters()
|
||||
err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs)
|
||||
err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
if withFilter {
|
||||
@@ -995,7 +1048,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
|
||||
|
||||
// Move it back to a new empty remote, dst does not exist this time
|
||||
accounting.Stats.ResetCounters()
|
||||
err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs)
|
||||
err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
if withFilter {
|
||||
@@ -1020,7 +1073,7 @@ func TestMoveWithDeleteEmptySrcDirs(t *testing.T) {
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
// run move with --delete-empty-src-dirs
|
||||
err := MoveDir(r.Fremote, r.Flocal, true)
|
||||
err := MoveDir(r.Fremote, r.Flocal, true, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -1040,7 +1093,7 @@ func TestMoveWithoutDeleteEmptySrcDirs(t *testing.T) {
|
||||
file2 := r.WriteFile("nested/sub dir/file", "nested", t1)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err := MoveDir(r.Fremote, r.Flocal, false)
|
||||
err := MoveDir(r.Fremote, r.Flocal, false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -1101,16 +1154,37 @@ func TestServerSideMoveOverlap(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
// Subdir move with no filters should return ErrorCantMoveOverlapping
|
||||
err = MoveDir(FremoteMove, r.Fremote, false)
|
||||
assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error())
|
||||
err = MoveDir(FremoteMove, r.Fremote, false, false)
|
||||
assert.EqualError(t, err, fs.ErrorOverlapping.Error())
|
||||
|
||||
// Now try with a filter which should also fail with ErrorCantMoveOverlapping
|
||||
filter.Active.Opt.MinSize = 40
|
||||
defer func() {
|
||||
filter.Active.Opt.MinSize = -1
|
||||
}()
|
||||
err = MoveDir(FremoteMove, r.Fremote, false)
|
||||
assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error())
|
||||
err = MoveDir(FremoteMove, r.Fremote, false, false)
|
||||
assert.EqualError(t, err, fs.ErrorOverlapping.Error())
|
||||
}
|
||||
|
||||
// Test a sync with overlap
|
||||
func TestSyncOverlap(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
subRemoteName := r.FremoteName + "/rclone-sync-test"
|
||||
FremoteSync, err := fs.NewFs(subRemoteName)
|
||||
require.NoError(t, err)
|
||||
|
||||
checkErr := func(err error) {
|
||||
require.Error(t, err)
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
assert.Equal(t, fs.ErrorOverlapping.Error(), err.Error())
|
||||
}
|
||||
|
||||
checkErr(Sync(FremoteSync, r.Fremote, false))
|
||||
checkErr(Sync(r.Fremote, FremoteSync, false))
|
||||
checkErr(Sync(r.Fremote, r.Fremote, false))
|
||||
checkErr(Sync(FremoteSync, FremoteSync, false))
|
||||
}
|
||||
|
||||
// Test with BackupDir set
|
||||
@@ -1145,7 +1219,7 @@ func testSyncBackupDir(t *testing.T, suffix string) {
|
||||
require.NoError(t, err)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(fdst, r.Flocal)
|
||||
err = Sync(fdst, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// one should be moved to the backup dir and the new one installed
|
||||
@@ -1166,7 +1240,7 @@ func testSyncBackupDir(t *testing.T, suffix string) {
|
||||
// This should delete three and overwrite one again, checking
|
||||
// the files got overwritten correctly in backup-dir
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(fdst, r.Flocal)
|
||||
err = Sync(fdst, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// one should be moved to the backup dir and the new one installed
|
||||
@@ -1203,7 +1277,7 @@ func TestSyncUTFNorm(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file, but kept the
|
||||
@@ -1229,7 +1303,7 @@ func TestSyncImmutable(t *testing.T) {
|
||||
|
||||
// Should succeed
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
@@ -1241,7 +1315,7 @@ func TestSyncImmutable(t *testing.T) {
|
||||
|
||||
// Should fail with ErrorImmutableModified and not modify local or remote files
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
assert.EqualError(t, err, fs.ErrorImmutableModified.Error())
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
@@ -1277,6 +1351,6 @@ func TestAbort(t *testing.T) {
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
assert.Equal(t, accounting.ErrorMaxTransferLimitReached, err)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// Version of rclone
|
||||
var Version = "v1.46"
|
||||
var Version = "v1.46-DEV"
|
||||
|
||||
@@ -138,3 +138,7 @@ backends:
|
||||
remote: "TestUnion:"
|
||||
subdir: false
|
||||
fastlist: false
|
||||
- backend: "koofr"
|
||||
remote: "TestKoofr:"
|
||||
subdir: false
|
||||
fastlist: false
|
||||
|
||||
58
go.mod
58
go.mod
@@ -2,65 +2,59 @@ module github.com/ncw/rclone
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
|
||||
cloud.google.com/go v0.33.1 // indirect
|
||||
cloud.google.com/go v0.36.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.1.8
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20181023070848-cf01652132cc
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20190123011202-457680cc0804
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
|
||||
github.com/Unknwon/goconfig v0.0.0-20181105214110-56bd8ab18619
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6
|
||||
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4
|
||||
github.com/abbot/go-http-auth v0.4.0
|
||||
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5
|
||||
github.com/aws/aws-sdk-go v1.15.81
|
||||
github.com/aws/aws-sdk-go v1.16.31
|
||||
github.com/billziss-gh/cgofuse v1.1.0
|
||||
github.com/coreos/bbolt v0.0.0-20180318001526-af9db2027c98
|
||||
github.com/coreos/bbolt v1.3.2
|
||||
github.com/cpuguy83/go-md2man v1.0.8 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/djherbis/times v1.1.0
|
||||
github.com/djherbis/times v1.2.0
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 // indirect
|
||||
github.com/goftp/server v0.0.0-20190111142836-88de73f463af
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jlaffaye/ftp v0.0.0-20181101112434-47f21d10f0ee
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect
|
||||
github.com/jlaffaye/ftp v0.0.0-20190126081051-8019e6774408
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect
|
||||
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a // indirect
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.3 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
github.com/ncw/swift v1.0.44
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed
|
||||
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pengsrc/go-shared v0.2.0 // indirect
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/pkg/sftp v1.8.3
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pkg/sftp v1.10.0
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46
|
||||
github.com/russross/blackfriday v1.5.2 // indirect
|
||||
github.com/sevlyar/go-daemon v0.1.4
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 // indirect
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20190104022628-a2dfa6d0dab6
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c // indirect
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/cobra v0.0.4-0.20190109003409-7547e83b2d85
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/stretchr/testify v1.2.2
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190205172012-55a226cf41da
|
||||
github.com/xanzy/ssh-agent v0.2.0
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a
|
||||
golang.org/x/oauth2 v0.0.0-20181120190819-8f65e3013eba
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b
|
||||
golang.org/x/text v0.3.0
|
||||
go.etcd.io/bbolt v1.3.2 // indirect
|
||||
golang.org/x/crypto v0.0.0-20190208162236-193df9c0f06f
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006
|
||||
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c
|
||||
google.golang.org/api v0.0.0-20181120235003-faade3cbb06a
|
||||
google.golang.org/appengine v1.3.0 // indirect
|
||||
google.golang.org/api v0.1.0
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.1
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
)
|
||||
|
||||
223
go.sum
223
go.sum
@@ -1,152 +1,265 @@
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669 h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw=
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
cloud.google.com/go v0.33.1 h1:fmJQWZ1w9PGkHR1YL/P7HloDvqlmKQ4Vpb7PC2e+aCk=
|
||||
cloud.google.com/go v0.33.1/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.36.0 h1:+aCSj7tOo2LODWVEuZDZeGCckdt6MlSF+X/rB3wUiS8=
|
||||
cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40=
|
||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8 h1:KmVRa8oFMaargVesEuuEoiLCQ4zCCwQ8QX/xg++KS20=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20181023070848-cf01652132cc h1:BElWmFfsryQD72OcovStKpkIcd4e9ozSkdsTNQDSHGk=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20181023070848-cf01652132cc/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20190123011202-457680cc0804 h1:QjGHsWFbJyl312t0BtgkmZy2TTYA++FF0UakGbr3ZhQ=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20190123011202-457680cc0804/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Unknwon/goconfig v0.0.0-20181105214110-56bd8ab18619 h1:6X8iB881g299aNEv6KXrcjL31iLOH7yA6NXoQX+MbDg=
|
||||
github.com/Unknwon/goconfig v0.0.0-20181105214110-56bd8ab18619/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw=
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6 h1:UCQe3W9LxwL2ff5r0PqQfS6Oe5MCKpIH8twfK/dH9mw=
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg=
|
||||
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4 h1:mK1/QgFPU4osbhjJ26B1w738kjQHaGJcon8uCLMS8fk=
|
||||
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg=
|
||||
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
|
||||
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
|
||||
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5 h1:lmyFvZXNGOmsKCYXNwzDLWafnxeewxsFwdsvTvSC1sg=
|
||||
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5/go.mod h1:DGqLjaZ3ziKKNRt+U5Q9PLWJ52Q/4rxfaaH/b3QYKaE=
|
||||
github.com/aws/aws-sdk-go v1.15.81 h1:va7uoFaV9uKAtZ6BTmp1u7paoMsizYRRLvRuoC07nQ8=
|
||||
github.com/aws/aws-sdk-go v1.15.81/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/aws/aws-sdk-go v1.16.31 h1:bE4FW2uulhXiAaF4Guw0OzX9gBZ4iWvXWe6VT8Jxr28=
|
||||
github.com/aws/aws-sdk-go v1.16.31/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/billziss-gh/cgofuse v1.1.0 h1:tATn9ZDvuPcOVlvR4tJitGHgAqy1y18+4mKmRfdfjec=
|
||||
github.com/billziss-gh/cgofuse v1.1.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/coreos/bbolt v0.0.0-20180318001526-af9db2027c98 h1:0gQU5Ebjs1V8Ow5bBzxZzr0peNjJILSkSb30IfZtshQ=
|
||||
github.com/coreos/bbolt v0.0.0-20180318001526-af9db2027c98/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
|
||||
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/djherbis/times v1.1.0 h1:NFhBDODme0XNX+/5ETW9qL6v3Ty57psiXIQBrzzg44E=
|
||||
github.com/djherbis/times v1.1.0/go.mod h1:CGMZlo255K5r4Yw0b9RRfFQpM2y7uOmxg4jm9HsaVf8=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible h1:ZFvUIiBbGhDY5zF8yjLoWhUAYs7uDodUpbvTS5oelDE=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.0+incompatible h1:FQu9Ef2dkC8g2rQmcQmpXXeoRegXHODBfveKKZu6+e8=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.1-0.20181205034806-56e5f6595305+incompatible h1:4HSS6BiPqvgsn/zrwt6KOYY+mw153zmhvewZIRh1+Ds=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.1-0.20181205034806-56e5f6595305+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/djherbis/times v1.2.0 h1:xANXjsC/iBqbO00vkWlYwPWgBgEVU6m6AFYg0Pic+Mc=
|
||||
github.com/djherbis/times v1.2.0/go.mod h1:CGMZlo255K5r4Yw0b9RRfFQpM2y7uOmxg4jm9HsaVf8=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible h1:9jnukMIowLSo3SY7+GTwxmYJv4QC0LxXbo97zHWCyoc=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 h1:cC0Hbb+18DJ4i6ybqDybvj4wdIDS4vnD0QEci98PgM8=
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9/go.mod h1:GpOj6zuVBG3Inr9qjEnuVTgBlk2lZ1S9DcoFiXWyKss=
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1 h1:WjgeEHEDLGx56ndxS6FYi6qFjZGajSVHPuEPdpJ60cI=
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
|
||||
github.com/goftp/server v0.0.0-20190111142836-88de73f463af h1:PJxb1aA1z+Ohy2j28L92+ng9phXpZVFRFbPkfmJcRGo=
|
||||
github.com/goftp/server v0.0.0-20190111142836-88de73f463af/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jlaffaye/ftp v0.0.0-20181101112434-47f21d10f0ee h1:oCvgfeGIc6GipidJVyG0Hd9R/w6TO8bBYyJg15ZgJkw=
|
||||
github.com/jlaffaye/ftp v0.0.0-20181101112434-47f21d10f0ee/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/jlaffaye/ftp v0.0.0-20190126081051-8019e6774408 h1:9AeqmB6KVEJ7GQU985MGQc7Mtxz1+C+JZkgqBnUWqMU=
|
||||
github.com/jlaffaye/ftp v0.0.0-20190126081051-8019e6774408/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro=
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a h1:W+gnfphB7WpRj0rbTF40e3edULfri4fou2kUFw6AF3A=
|
||||
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a/go.mod h1:3xszwh+rNrYk1r9SStc4iJ326gne1OaBcrdB1ACsbzI=
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff h1:GlfzG8bgyoJYz+5sMvGpYnHrg4veNVNnDGuE9hTEMHk=
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff/go.mod h1:MRAz4Gsxd+OzrZ0owwrUHc0zLESL+1Y5syqK/sJxK2A=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zXP2Ew+9VupSpfybr+TxmjdH0=
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
|
||||
github.com/ncw/swift v1.0.42 h1:ztvRb6hs52IHOcaYt73f9lXYLIeIuWgdooRDhdyllGI=
|
||||
github.com/ncw/swift v1.0.42/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/ncw/swift v1.0.43 h1:TZn2l/bPV0CqG+/G5BFh/ROWnyX7dL2D0URaOjNQRsw=
|
||||
github.com/ncw/swift v1.0.43/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/ncw/swift v1.0.44 h1:EKvOTvUxElbpDWqxsyVaVGvc2IfuOqQnRmjnR2AGhQ4=
|
||||
github.com/ncw/swift v1.0.44/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed h1:bAVGG6B+R5qpSylrrA+BAMrzYkdAoiTaKPVxRB+4cyM=
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840=
|
||||
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pengsrc/go-shared v0.2.0 h1:Ho86LhaXOYgv9FjBmIp5CO0LmaIj49H2HZhYh0+7uW8=
|
||||
github.com/pengsrc/go-shared v0.2.0/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.8.3 h1:9jSe2SxTM8/3bXZjtqnkgTBW+lA8db0knZJyns7gpBA=
|
||||
github.com/pkg/sftp v1.8.3/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.10.0 h1:DGA1KlA9esU6WcicH+P8PxFZOl15O6GYtab1cIJdOlE=
|
||||
github.com/pkg/sftp v1.10.0/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46 h1:w2CpS5muK+jyydnmlkqpAhzKmHmMBzBkfYUDjQNS1Dk=
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46/go.mod h1:U2bmx0hDj8EyDdcxmD5t3XHDnBFnyNNc22n1R4008eM=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sevlyar/go-daemon v0.1.4 h1:Ayxp/9SNHwPBjV+kKbnHl2ch6rhxTu08jfkGkoxgULQ=
|
||||
github.com/sevlyar/go-daemon v0.1.4/go.mod h1:6dJpPatBT9eUwM5VCw9Bt6CdX9Tk6UWvhW3MebLDRKE=
|
||||
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
|
||||
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
|
||||
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
|
||||
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
|
||||
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
|
||||
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
|
||||
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c h1:fyKiXKO1/I/B6Y2U8T7WdQGWzwehOuGIrljPtt7YTTI=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
|
||||
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
|
||||
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
|
||||
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
|
||||
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
|
||||
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
|
||||
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||
github.com/skratchdot/open-golang v0.0.0-20190104022628-a2dfa6d0dab6 h1:cGT4dcuEyBwwu/v6tosyqcDp2yoIo/LwjMGixUvg3nU=
|
||||
github.com/skratchdot/open-golang v0.0.0-20190104022628-a2dfa6d0dab6/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY=
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.4-0.20190109003409-7547e83b2d85 h1:RghwryY75x76zKqO9v7NF+9lcmfW1/RNZBfqK4LSCKE=
|
||||
github.com/spf13/cobra v0.0.4-0.20190109003409-7547e83b2d85/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20180817194457-854bf31d998b h1:Yt/fB2INfWb29Vcya4X0BNCLmObKmDdt0o0IndFzEY8=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20180817194457-854bf31d998b/go.mod h1:ObGZcW2yPzFXEsaTZVMgLKhdUSHMYM3aEDE/u7YnfU8=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190204171941-1b8ac3503be8 h1:pRD8qAFGq7u9wK/lTg64wkdpCZh4lsTvRoEzQWWLMGE=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190204171941-1b8ac3503be8/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190205172012-55a226cf41da h1:hWHAUYEtxnj8tb2pHj5WPGEIE8leSi/3cMND8fUjsBE=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190205172012-55a226cf41da/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
|
||||
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible h1:/Z0q3/eSMoPYAuRmhjWtuGSmVVciFC6hfm3yfCKuvz0=
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible/go.mod h1:w6wqLDQ5bBTzxGJ55581UrSwLrsTAsdo9N6yX/8d9RY=
|
||||
golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869 h1:kkXA53yGe04D0adEYJwEVQjeBppL01Exg+fnMjfUraU=
|
||||
golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613 h1:MQ/ZZiDsUapFFiMS+vzwXkCTeEKaum+Do5rINYJDmxc=
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190208162236-193df9c0f06f h1:ETU2VEl7TnT5bl7IvuKEzTDpplg5wzGYsOCAPhdoEIg=
|
||||
golang.org/x/crypto v0.0.0-20190208162236-193df9c0f06f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20181120190819-8f65e3013eba h1:YDkOrzGLLYybtuP6ZgebnO4OWYEYVMFSniazXsxrFN8=
|
||||
golang.org/x/oauth2 v0.0.0-20181120190819-8f65e3013eba/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80=
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1 h1:VeAkjQVzKLmu+JnFcK96TPbkuaTIqwGGAzQ9hgwPjVg=
|
||||
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b h1:MQE+LT/ABUuuvEZ+YQAMSXindAdUh7slEmAkup74op4=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952 h1:FDfvYgoVsA7TTZSbgiqjAbfPbK47CNHdWl3h/PJtii0=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
google.golang.org/api v0.0.0-20181120235003-faade3cbb06a h1:yMfgT1baklxtECXVk3UtZBELVXtVhDbK3/7xLFkFypw=
|
||||
google.golang.org/api v0.0.0-20181120235003-faade3cbb06a/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI=
|
||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0 h1:FBSsiFRMz3LBeXIomRnVzrQwSDj4ibvcRexLG0LZGQk=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||
|
||||
@@ -13,23 +13,32 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
fns []func()
|
||||
fns = make(map[FnHandle]bool)
|
||||
fnsMutex sync.Mutex
|
||||
exitChan chan os.Signal
|
||||
exitOnce sync.Once
|
||||
registerOnce sync.Once
|
||||
)
|
||||
|
||||
// Register a function to be called on exit
|
||||
func Register(fn func()) {
|
||||
fns = append(fns, fn)
|
||||
// FnHandle is the type of the handle returned by function `Register`
|
||||
// that can be used to unregister an at-exit function
|
||||
type FnHandle *func()
|
||||
|
||||
// Register a function to be called on exit.
|
||||
// Returns a handle which can be used to unregister the function with `Unregister`.
|
||||
func Register(fn func()) FnHandle {
|
||||
fnsMutex.Lock()
|
||||
fns[&fn] = true
|
||||
fnsMutex.Unlock()
|
||||
|
||||
// Run AtExit handlers on SIGINT or SIGTERM so everything gets
|
||||
// tidied up properly
|
||||
registerOnce.Do(func() {
|
||||
exitChan = make(chan os.Signal, 1)
|
||||
signal.Notify(exitChan, os.Interrupt) // syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT
|
||||
go func() {
|
||||
sig, closed := <-exitChan
|
||||
if closed || sig == nil {
|
||||
sig := <-exitChan
|
||||
if sig == nil {
|
||||
return
|
||||
}
|
||||
fs.Infof(nil, "Signal received: %s", sig)
|
||||
@@ -38,6 +47,15 @@ func Register(fn func()) {
|
||||
os.Exit(0)
|
||||
}()
|
||||
})
|
||||
|
||||
return &fn
|
||||
}
|
||||
|
||||
// Unregister a function using the handle returned by `Register`
|
||||
func Unregister(handle FnHandle) {
|
||||
fnsMutex.Lock()
|
||||
defer fnsMutex.Unlock()
|
||||
delete(fns, handle)
|
||||
}
|
||||
|
||||
// IgnoreSignals disables the signal handler and prevents Run from beeing executed automatically
|
||||
@@ -53,8 +71,10 @@ func IgnoreSignals() {
|
||||
// Run all the at exit functions if they haven't been run already
|
||||
func Run() {
|
||||
exitOnce.Do(func() {
|
||||
for _, fn := range fns {
|
||||
fn()
|
||||
fnsMutex.Lock()
|
||||
defer fnsMutex.Unlock()
|
||||
for fnHandle := range fns {
|
||||
(*fnHandle)()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
633
lib/encoder/encoder.go
Normal file
633
lib/encoder/encoder.go
Normal file
@@ -0,0 +1,633 @@
|
||||
/*
|
||||
Translate file names for usage on restrictive storage systems
|
||||
|
||||
The restricted set of characters are mapped to a unicode equivalent version
|
||||
(most to their FULLWIDTH variant) to increase compatability with other
|
||||
storage systems.
|
||||
See: http://unicode-search.net/unicode-namesearch.pl?term=FULLWIDTH
|
||||
|
||||
Encoders will also quote reserved characters to differentiate between
|
||||
the raw and encoded forms.
|
||||
*/
|
||||
|
||||
package encoder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
// adding this to any printable ASCII character turns it into the
|
||||
// FULLWIDTH variant
|
||||
fullOffset = 0xFEE0
|
||||
// the first rune of the SYMBOL FOR block for control characters
|
||||
symbolOffset = '␀' // SYMBOL FOR NULL
|
||||
// QuoteRune is the rune used for quoting reserved characters
|
||||
QuoteRune = '‛' // SINGLE HIGH-REVERSED-9 QUOTATION MARK
|
||||
// EncodeStandard contains the flags used for the Standard Encoder
|
||||
EncodeStandard = EncodeZero | EncodeSlash | EncodeCtl | EncodeDel
|
||||
// Standard defines the encoding that is used for paths in- and output by rclone.
|
||||
//
|
||||
// List of replaced characters:
|
||||
// (0x00) -> '␀' // SYMBOL FOR NULL
|
||||
// / (slash) -> '/' // FULLWIDTH SOLIDUS
|
||||
Standard = MultiEncoder(EncodeStandard)
|
||||
)
|
||||
|
||||
// Possible flags for the MultiEncoder
|
||||
const (
|
||||
EncodeZero uint = 0 // NUL(0x00)
|
||||
EncodeSlash uint = 1 << iota // /
|
||||
EncodeWin // :?"*<>|
|
||||
EncodeBackSlash // \
|
||||
EncodeHashPercent // #%
|
||||
EncodeDel // DEL(0x7F)
|
||||
EncodeCtl // CTRL(0x01-0x1F)
|
||||
EncodeLeftSpace // Leading SPACE
|
||||
EncodeLeftTilde // Leading ~
|
||||
EncodeRightSpace // Trailing SPACE
|
||||
EncodeRightPeriod // Trailing .
|
||||
EncodeInvalidUtf8 // Invalid UTF-8 bytes
|
||||
)
|
||||
|
||||
// Encoder can transform names to and from the original and translated version.
|
||||
type Encoder interface {
|
||||
// Encode takes a raw name and substitutes any reserved characters and
|
||||
// patterns in it
|
||||
Encode(string) string
|
||||
// Decode takes a name and undoes any substitutions made by Encode
|
||||
Decode(string) string
|
||||
|
||||
// FromStandardPath takes a / separated path in Standard encoding
|
||||
// and converts it to a / separated path in this encoding.
|
||||
FromStandardPath(string) string
|
||||
// FromStandardName takes name in Standard encoding and converts
|
||||
// it in this encoding.
|
||||
FromStandardName(string) string
|
||||
// ToStandardPath takes a / separated path in this encoding
|
||||
// and converts it to a / separated path in Standard encoding.
|
||||
ToStandardPath(string) string
|
||||
// ToStandardName takes name in this encoding and converts
|
||||
// it in Standard encoding.
|
||||
ToStandardName(string) string
|
||||
}
|
||||
|
||||
// MultiEncoder is a configurable Encoder. The Encode* constants in this
|
||||
// package can be combined using bitwise or (|) to enable handling of multiple
|
||||
// character classes
|
||||
type MultiEncoder uint
|
||||
|
||||
// Encode takes a raw name and substitutes any reserved characters and
|
||||
// patterns in it
|
||||
func (mask MultiEncoder) Encode(in string) string {
|
||||
var (
|
||||
encodeWin = uint(mask)&EncodeWin != 0
|
||||
encodeSlash = uint(mask)&EncodeSlash != 0
|
||||
encodeBackSlash = uint(mask)&EncodeBackSlash != 0
|
||||
encodeHashPercent = uint(mask)&EncodeHashPercent != 0
|
||||
encodeDel = uint(mask)&EncodeDel != 0
|
||||
encodeCtl = uint(mask)&EncodeCtl != 0
|
||||
encodeLeftSpace = uint(mask)&EncodeLeftSpace != 0
|
||||
encodeLeftTilde = uint(mask)&EncodeLeftTilde != 0
|
||||
encodeRightSpace = uint(mask)&EncodeRightSpace != 0
|
||||
encodeRightPeriod = uint(mask)&EncodeRightPeriod != 0
|
||||
encodeInvalidUnicode = uint(mask)&EncodeInvalidUtf8 != 0
|
||||
)
|
||||
|
||||
// handle prefix only replacements
|
||||
prefix := ""
|
||||
if encodeLeftSpace && len(in) > 0 { // Leading SPACE
|
||||
if in[0] == ' ' {
|
||||
prefix, in = "␠", in[1:] // SYMBOL FOR SPACE
|
||||
} else if r, l := utf8.DecodeRuneInString(in); r == '␠' { // SYMBOL FOR SPACE
|
||||
prefix, in = string(QuoteRune)+"␠", in[l:] // SYMBOL FOR SPACE
|
||||
}
|
||||
}
|
||||
if encodeLeftTilde && len(in) > 0 { // Leading ~
|
||||
if in[0] == '~' {
|
||||
prefix, in = string('~'+fullOffset), in[1:] // FULLWIDTH TILDE
|
||||
} else if r, l := utf8.DecodeRuneInString(in); r == '~'+fullOffset {
|
||||
prefix, in = string(QuoteRune)+string('~'+fullOffset), in[l:] // FULLWIDTH TILDE
|
||||
}
|
||||
}
|
||||
// handle suffix only replacements
|
||||
suffix := ""
|
||||
if encodeRightSpace && len(in) > 0 { // Trailing SPACE
|
||||
if in[len(in)-1] == ' ' {
|
||||
suffix, in = "␠", in[:len(in)-1] // SYMBOL FOR SPACE
|
||||
} else if r, l := utf8.DecodeLastRuneInString(in); r == '␠' {
|
||||
suffix, in = string(QuoteRune)+"␠", in[:len(in)-l] // SYMBOL FOR SPACE
|
||||
}
|
||||
}
|
||||
if encodeRightPeriod && len(in) > 0 { // Trailing .
|
||||
if in[len(in)-1] == '.' {
|
||||
suffix, in = ".", in[:len(in)-1] // FULLWIDTH FULL STOP
|
||||
} else if r, l := utf8.DecodeLastRuneInString(in); r == '.' {
|
||||
suffix, in = string(QuoteRune)+".", in[:len(in)-l] // FULLWIDTH FULL STOP
|
||||
}
|
||||
}
|
||||
index := 0
|
||||
if prefix == "" && suffix == "" {
|
||||
// find the first rune which (most likely) needs to be replaced
|
||||
index = strings.IndexFunc(in, func(r rune) bool {
|
||||
switch r {
|
||||
case 0, '␀', QuoteRune, utf8.RuneError:
|
||||
return true
|
||||
}
|
||||
if encodeWin { // :?"*<>|
|
||||
switch r {
|
||||
case '*', '<', '>', '?', ':', '|', '"',
|
||||
'*', '<', '>', '?', ':', '|', '"':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeSlash { // /
|
||||
switch r {
|
||||
case '/',
|
||||
'/':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeBackSlash { // \
|
||||
switch r {
|
||||
case '\\',
|
||||
'\':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeHashPercent { // #%
|
||||
switch r {
|
||||
case '#', '%',
|
||||
'#', '%':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeDel { // DEL(0x7F)
|
||||
switch r {
|
||||
case rune(0x7F), '␡':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeCtl { // CTRL(0x01-0x1F)
|
||||
if r >= 1 && r <= 0x1F {
|
||||
return true
|
||||
} else if r > symbolOffset && r <= symbolOffset+0x1F {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
// nothing to replace, return input
|
||||
if index == -1 {
|
||||
return in
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
out.Grow(len(in) + len(prefix) + len(suffix))
|
||||
out.WriteString(prefix)
|
||||
// copy the clean part of the input and skip it
|
||||
out.WriteString(in[:index])
|
||||
in = in[index:]
|
||||
|
||||
for i, r := range in {
|
||||
switch r {
|
||||
case 0:
|
||||
out.WriteRune(symbolOffset)
|
||||
continue
|
||||
case '␀', QuoteRune:
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
case utf8.RuneError:
|
||||
if encodeInvalidUnicode {
|
||||
// only encode invalid sequences and not utf8.RuneError
|
||||
if i+3 > len(in) || in[i:i+3] != string(utf8.RuneError) {
|
||||
_, l := utf8.DecodeRuneInString(in[i:])
|
||||
appendQuotedBytes(&out, in[i:i+l])
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// append the real bytes instead of utf8.RuneError
|
||||
_, l := utf8.DecodeRuneInString(in[i:])
|
||||
out.WriteString(in[i : i+l])
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeWin { // :?"*<>|
|
||||
switch r {
|
||||
case '*', '<', '>', '?', ':', '|', '"':
|
||||
out.WriteRune(r + fullOffset)
|
||||
continue
|
||||
case '*', '<', '>', '?', ':', '|', '"':
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeSlash { // /
|
||||
switch r {
|
||||
case '/':
|
||||
out.WriteRune(r + fullOffset)
|
||||
continue
|
||||
case '/':
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeBackSlash { // \
|
||||
switch r {
|
||||
case '\\':
|
||||
out.WriteRune(r + fullOffset)
|
||||
continue
|
||||
case '\':
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeHashPercent { // #%
|
||||
switch r {
|
||||
case '#', '%':
|
||||
out.WriteRune(r + fullOffset)
|
||||
continue
|
||||
case '#', '%':
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeDel { // DEL(0x7F)
|
||||
switch r {
|
||||
case rune(0x7F):
|
||||
out.WriteRune('␡') // SYMBOL FOR DELETE
|
||||
continue
|
||||
case '␡':
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeCtl { // CTRL(0x01-0x1F)
|
||||
if r >= 1 && r <= 0x1F {
|
||||
out.WriteRune('␀' + r) // SYMBOL FOR NULL
|
||||
continue
|
||||
} else if r > symbolOffset && r <= symbolOffset+0x1F {
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
out.WriteRune(r)
|
||||
}
|
||||
out.WriteString(suffix)
|
||||
return out.String()
|
||||
}
|
||||
|
||||
// Decode takes a name and undoes any substitutions made by Encode
|
||||
func (mask MultiEncoder) Decode(in string) string {
|
||||
var (
|
||||
encodeWin = uint(mask)&EncodeWin != 0
|
||||
encodeSlash = uint(mask)&EncodeSlash != 0
|
||||
encodeBackSlash = uint(mask)&EncodeBackSlash != 0
|
||||
encodeHashPercent = uint(mask)&EncodeHashPercent != 0
|
||||
encodeDel = uint(mask)&EncodeDel != 0
|
||||
encodeCtl = uint(mask)&EncodeCtl != 0
|
||||
encodeLeftSpace = uint(mask)&EncodeLeftSpace != 0
|
||||
encodeLeftTilde = uint(mask)&EncodeLeftTilde != 0
|
||||
encodeRightSpace = uint(mask)&EncodeRightSpace != 0
|
||||
encodeRightPeriod = uint(mask)&EncodeRightPeriod != 0
|
||||
encodeInvalidUnicode = uint(mask)&EncodeInvalidUtf8 != 0
|
||||
)
|
||||
|
||||
// handle prefix only replacements
|
||||
prefix := ""
|
||||
if r, l1 := utf8.DecodeRuneInString(in); encodeLeftSpace && r == '␠' { // SYMBOL FOR SPACE
|
||||
prefix, in = " ", in[l1:]
|
||||
} else if encodeLeftTilde && r == '~' { // FULLWIDTH TILDE
|
||||
prefix, in = "~", in[l1:]
|
||||
} else if r == QuoteRune {
|
||||
if r, l2 := utf8.DecodeRuneInString(in[l1:]); encodeLeftSpace && r == '␠' { // SYMBOL FOR SPACE
|
||||
prefix, in = "␠", in[l1+l2:]
|
||||
} else if encodeLeftTilde && r == '~' { // FULLWIDTH TILDE
|
||||
prefix, in = "~", in[l1+l2:]
|
||||
}
|
||||
}
|
||||
|
||||
// handle suffix only replacements
|
||||
suffix := ""
|
||||
if r, l := utf8.DecodeLastRuneInString(in); encodeRightSpace && r == '␠' { // SYMBOL FOR SPACE
|
||||
in = in[:len(in)-l]
|
||||
if r, l2 := utf8.DecodeLastRuneInString(in); r == QuoteRune {
|
||||
suffix, in = "␠", in[:len(in)-l2]
|
||||
} else {
|
||||
suffix = " "
|
||||
}
|
||||
} else if encodeRightPeriod && r == '.' { // FULLWIDTH FULL STOP
|
||||
in = in[:len(in)-l]
|
||||
if r, l2 := utf8.DecodeLastRuneInString(in); r == QuoteRune {
|
||||
suffix, in = ".", in[:len(in)-l2]
|
||||
} else {
|
||||
suffix = "."
|
||||
}
|
||||
}
|
||||
index := 0
|
||||
if prefix == "" && suffix == "" {
|
||||
// find the first rune which (most likely) needs to be replaced
|
||||
index = strings.IndexFunc(in, func(r rune) bool {
|
||||
switch r {
|
||||
case '␀', QuoteRune:
|
||||
return true
|
||||
}
|
||||
if encodeWin { // :?"*<>|
|
||||
switch r {
|
||||
case '*', '<', '>', '?', ':', '|', '"':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeSlash { // /
|
||||
switch r {
|
||||
case '/':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeBackSlash { // \
|
||||
switch r {
|
||||
case '\':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeHashPercent { // #%
|
||||
switch r {
|
||||
case '#', '%':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeDel { // DEL(0x7F)
|
||||
switch r {
|
||||
case '␡':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeCtl { // CTRL(0x01-0x1F)
|
||||
if r > symbolOffset && r <= symbolOffset+0x1F {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
}
|
||||
// nothing to replace, return input
|
||||
if index == -1 {
|
||||
return in
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
out.Grow(len(in))
|
||||
out.WriteString(prefix)
|
||||
// copy the clean part of the input and skip it
|
||||
out.WriteString(in[:index])
|
||||
in = in[index:]
|
||||
var unquote, unquoteNext, skipNext bool
|
||||
|
||||
for i, r := range in {
|
||||
if skipNext {
|
||||
skipNext = false
|
||||
continue
|
||||
}
|
||||
unquote, unquoteNext = unquoteNext, false
|
||||
switch r {
|
||||
case '␀': // SYMBOL FOR NULL
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(0)
|
||||
}
|
||||
continue
|
||||
case QuoteRune:
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
unquoteNext = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
if encodeWin { // :?"*<>|
|
||||
switch r {
|
||||
case '*', '<', '>', '?', ':', '|', '"':
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(r - fullOffset)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeSlash { // /
|
||||
switch r {
|
||||
case '/': // FULLWIDTH SOLIDUS
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(r - fullOffset)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeBackSlash { // \
|
||||
switch r {
|
||||
case '\': // FULLWIDTH REVERSE SOLIDUS
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(r - fullOffset)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeHashPercent { // #%
|
||||
switch r {
|
||||
case '#', '%':
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(r - fullOffset)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeDel { // DEL(0x7F)
|
||||
switch r {
|
||||
case '␡': // SYMBOL FOR DELETE
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(0x7F)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeCtl { // CTRL(0x01-0x1F)
|
||||
if r > symbolOffset && r <= symbolOffset+0x1F {
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(r - symbolOffset)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if unquote {
|
||||
if encodeInvalidUnicode {
|
||||
skipNext = appendUnquotedByte(&out, in[i:])
|
||||
if skipNext {
|
||||
continue
|
||||
}
|
||||
}
|
||||
out.WriteRune(QuoteRune)
|
||||
}
|
||||
switch r {
|
||||
case utf8.RuneError:
|
||||
// append the real bytes instead of utf8.RuneError
|
||||
_, l := utf8.DecodeRuneInString(in[i:])
|
||||
out.WriteString(in[i : i+l])
|
||||
continue
|
||||
}
|
||||
|
||||
out.WriteRune(r)
|
||||
}
|
||||
if unquoteNext {
|
||||
out.WriteRune(QuoteRune)
|
||||
}
|
||||
out.WriteString(suffix)
|
||||
return out.String()
|
||||
}
|
||||
|
||||
// FromStandardPath takes a / separated path in Standard encoding
|
||||
// and converts it to a / separated path in this encoding.
|
||||
func (mask MultiEncoder) FromStandardPath(s string) string {
|
||||
return FromStandardPath(mask, s)
|
||||
}
|
||||
|
||||
// FromStandardName takes name in Standard encoding and converts
|
||||
// it in this encoding.
|
||||
func (mask MultiEncoder) FromStandardName(s string) string {
|
||||
return FromStandardName(mask, s)
|
||||
}
|
||||
|
||||
// ToStandardPath takes a / separated path in this encoding
|
||||
// and converts it to a / separated path in Standard encoding.
|
||||
func (mask MultiEncoder) ToStandardPath(s string) string {
|
||||
return ToStandardPath(mask, s)
|
||||
}
|
||||
|
||||
// ToStandardName takes name in this encoding and converts
|
||||
// it in Standard encoding.
|
||||
func (mask MultiEncoder) ToStandardName(s string) string {
|
||||
return ToStandardName(mask, s)
|
||||
}
|
||||
|
||||
func appendQuotedBytes(w io.Writer, s string) {
|
||||
for _, b := range []byte(s) {
|
||||
_, _ = fmt.Fprintf(w, string(QuoteRune)+"%02X", b)
|
||||
}
|
||||
}
|
||||
func appendUnquotedByte(w io.Writer, s string) bool {
|
||||
if len(s) < 2 {
|
||||
return false
|
||||
}
|
||||
u, err := strconv.ParseUint(s[:2], 16, 8)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
n, _ := w.Write([]byte{byte(u)})
|
||||
return n == 1
|
||||
}
|
||||
|
||||
type identity struct{}
|
||||
|
||||
func (identity) Encode(in string) string { return in }
|
||||
func (identity) Decode(in string) string { return in }
|
||||
|
||||
func (i identity) FromStandardPath(s string) string {
|
||||
return FromStandardPath(i, s)
|
||||
}
|
||||
func (i identity) FromStandardName(s string) string {
|
||||
return FromStandardName(i, s)
|
||||
}
|
||||
func (i identity) ToStandardPath(s string) string {
|
||||
return ToStandardPath(i, s)
|
||||
}
|
||||
func (i identity) ToStandardName(s string) string {
|
||||
return ToStandardName(i, s)
|
||||
}
|
||||
|
||||
// Identity returns a Encoder that always returns the input value
|
||||
func Identity() Encoder {
|
||||
return identity{}
|
||||
}
|
||||
|
||||
// FromStandardPath takes a / separated path in Standard encoding
|
||||
// and converts it to a / separated path in the given encoding.
|
||||
func FromStandardPath(e Encoder, s string) string {
|
||||
if e == Standard {
|
||||
return s
|
||||
}
|
||||
parts := strings.Split(s, "/")
|
||||
encoded := make([]string, len(parts))
|
||||
changed := false
|
||||
for i, p := range parts {
|
||||
enc := FromStandardName(e, p)
|
||||
changed = changed || enc != p
|
||||
encoded[i] = enc
|
||||
}
|
||||
if !changed {
|
||||
return s
|
||||
}
|
||||
return strings.Join(encoded, "/")
|
||||
}
|
||||
|
||||
// FromStandardName takes name in Standard encoding and converts
|
||||
// it in the given encoding.
|
||||
func FromStandardName(e Encoder, s string) string {
|
||||
if e == Standard {
|
||||
return s
|
||||
}
|
||||
return e.Encode(Standard.Decode(s))
|
||||
}
|
||||
|
||||
// ToStandardPath takes a / separated path in the given encoding
|
||||
// and converts it to a / separated path in Standard encoding.
|
||||
func ToStandardPath(e Encoder, s string) string {
|
||||
if e == Standard {
|
||||
return s
|
||||
}
|
||||
parts := strings.Split(s, "/")
|
||||
encoded := make([]string, len(parts))
|
||||
changed := false
|
||||
for i, p := range parts {
|
||||
dec := ToStandardName(e, p)
|
||||
changed = changed || dec != p
|
||||
encoded[i] = dec
|
||||
}
|
||||
if !changed {
|
||||
return s
|
||||
}
|
||||
return strings.Join(encoded, "/")
|
||||
}
|
||||
|
||||
// ToStandardName takes name in the given encoding and converts
|
||||
// it in Standard encoding.
|
||||
func ToStandardName(e Encoder, s string) string {
|
||||
if e == Standard {
|
||||
return s
|
||||
}
|
||||
return Standard.Encode(e.Decode(s))
|
||||
}
|
||||
2217
lib/encoder/encoder_cases_test.go
Normal file
2217
lib/encoder/encoder_cases_test.go
Normal file
File diff suppressed because it is too large
Load Diff
262
lib/encoder/encoder_test.go
Normal file
262
lib/encoder/encoder_test.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package encoder
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
mask uint
|
||||
in string
|
||||
out string
|
||||
}
|
||||
|
||||
func TestEncodeSingleMask(t *testing.T) {
|
||||
for i, tc := range testCasesSingle {
|
||||
e := MultiEncoder(tc.mask)
|
||||
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
|
||||
got := e.Encode(tc.in)
|
||||
if got != tc.out {
|
||||
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
|
||||
}
|
||||
got2 := e.Decode(got)
|
||||
if got2 != tc.in {
|
||||
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeSingleMaskEdge(t *testing.T) {
|
||||
for i, tc := range testCasesSingleEdge {
|
||||
e := MultiEncoder(tc.mask)
|
||||
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
|
||||
got := e.Encode(tc.in)
|
||||
if got != tc.out {
|
||||
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
|
||||
}
|
||||
got2 := e.Decode(got)
|
||||
if got2 != tc.in {
|
||||
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeInvalidUnicode(t *testing.T) {
|
||||
for i, tc := range []testCase{
|
||||
{
|
||||
mask: EncodeInvalidUtf8,
|
||||
in: "\xBF",
|
||||
out: "‛BF",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8,
|
||||
in: "\xBF\xFE",
|
||||
out: "‛BF‛FE",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8,
|
||||
in: "a\xBF\xFEb",
|
||||
out: "a‛BF‛FEb",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8,
|
||||
in: "a\xBFξ\xFEb",
|
||||
out: "a‛BFξ‛FEb",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8 | EncodeBackSlash,
|
||||
in: "a\xBF\\\xFEb",
|
||||
out: "a‛BF\‛FEb",
|
||||
}, {
|
||||
mask: 0,
|
||||
in: "\xBF",
|
||||
out: "\xBF",
|
||||
}, {
|
||||
mask: 0,
|
||||
in: "\xBF\xFE",
|
||||
out: "\xBF\xFE",
|
||||
}, {
|
||||
mask: 0,
|
||||
in: "a\xBF\xFEb",
|
||||
out: "a\xBF\xFEb",
|
||||
}, {
|
||||
mask: 0,
|
||||
in: "a\xBFξ\xFEb",
|
||||
out: "a\xBFξ\xFEb",
|
||||
}, {
|
||||
mask: EncodeBackSlash,
|
||||
in: "a\xBF\\\xFEb",
|
||||
out: "a\xBF\\xFEb",
|
||||
},
|
||||
} {
|
||||
e := MultiEncoder(tc.mask)
|
||||
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
|
||||
got := e.Encode(tc.in)
|
||||
if got != tc.out {
|
||||
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
|
||||
}
|
||||
got2 := e.Decode(got)
|
||||
if got2 != tc.in {
|
||||
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestDecodeHalf(t *testing.T) {
|
||||
for i, tc := range []testCase{
|
||||
{
|
||||
mask: 0,
|
||||
in: "‛",
|
||||
out: "‛",
|
||||
}, {
|
||||
mask: 0,
|
||||
in: "‛‛",
|
||||
out: "‛",
|
||||
}, {
|
||||
mask: 0,
|
||||
in: "‛a‛",
|
||||
out: "‛a‛",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8,
|
||||
in: "a‛B‛Eg",
|
||||
out: "a‛B‛Eg",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8,
|
||||
in: "a‛B\‛Eg",
|
||||
out: "a‛B\‛Eg",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8 | EncodeBackSlash,
|
||||
in: "a‛B\‛Eg",
|
||||
out: "a‛B\\‛Eg",
|
||||
},
|
||||
} {
|
||||
e := MultiEncoder(tc.mask)
|
||||
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
|
||||
got := e.Decode(tc.in)
|
||||
if got != tc.out {
|
||||
t.Errorf("Decode(%q) want %q got %q", tc.in, tc.out, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const oneDrive = MultiEncoder(
|
||||
EncodeStandard |
|
||||
EncodeWin |
|
||||
EncodeBackSlash |
|
||||
EncodeHashPercent |
|
||||
EncodeDel |
|
||||
EncodeCtl |
|
||||
EncodeLeftTilde |
|
||||
EncodeRightSpace |
|
||||
EncodeRightPeriod)
|
||||
|
||||
var benchTests = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||
{" leading space", " leading space"},
|
||||
{"~leading tilde", "~leading tilde"},
|
||||
{"trailing dot.", "trailing dot."},
|
||||
{" leading space/ leading space/ leading space", " leading space/ leading space/ leading space"},
|
||||
{"~leading tilde/~leading tilde/~leading tilde", "~leading tilde/~leading tilde/~leading tilde"},
|
||||
{"leading tilde/~leading tilde", "leading tilde/~leading tilde"},
|
||||
{"trailing dot./trailing dot./trailing dot.", "trailing dot./trailing dot./trailing dot."},
|
||||
}
|
||||
|
||||
func benchReplace(b *testing.B, f func(string) string) {
|
||||
for range make([]struct{}, b.N) {
|
||||
for _, test := range benchTests {
|
||||
got := f(test.in)
|
||||
if got != test.out {
|
||||
b.Errorf("Encode(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchRestore(b *testing.B, f func(string) string) {
|
||||
for range make([]struct{}, b.N) {
|
||||
for _, test := range benchTests {
|
||||
got := f(test.out)
|
||||
if got != test.in {
|
||||
b.Errorf("Decode(%q) want %q got %q", got, test.in, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func BenchmarkOneDriveReplaceNew(b *testing.B) {
|
||||
benchReplace(b, oneDrive.Encode)
|
||||
}
|
||||
func BenchmarkOneDriveReplaceOld(b *testing.B) {
|
||||
benchReplace(b, replaceReservedChars)
|
||||
}
|
||||
func BenchmarkOneDriveRestoreNew(b *testing.B) {
|
||||
benchRestore(b, oneDrive.Decode)
|
||||
}
|
||||
func BenchmarkOneDriveRestoreOld(b *testing.B) {
|
||||
benchRestore(b, restoreReservedChars)
|
||||
}
|
||||
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
':': ':', // FULLWIDTH COLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'.': '.', // FULLWIDTH FULL STOP
|
||||
'~': '~', // FULLWIDTH TILDE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Folder names can't end with a period '.'
|
||||
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
|
||||
// OneDrive for Business file or folder names cannot begin with a tilde '~'
|
||||
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
|
||||
// Apparently file names can't start with space either
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
// Encode reserved characters
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
419
lib/encoder/internal/gen/main.go
Normal file
419
lib/encoder/internal/gen/main.go
Normal file
@@ -0,0 +1,419 @@
|
||||
// +build go1.10
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
const (
|
||||
edgeLeft = iota
|
||||
edgeRight
|
||||
)
|
||||
|
||||
type mapping struct {
|
||||
mask uint
|
||||
src, dst []rune
|
||||
}
|
||||
type stringPair struct {
|
||||
a, b string
|
||||
}
|
||||
|
||||
const header = `// Code generated by ./internal/gen/main.go. DO NOT EDIT.
|
||||
|
||||
` + `//go:generate go run ./internal/gen/main.go
|
||||
|
||||
package encoder
|
||||
|
||||
`
|
||||
|
||||
var maskBits = []struct {
|
||||
mask uint
|
||||
name string
|
||||
}{
|
||||
{encoder.EncodeZero, "EncodeZero"},
|
||||
{encoder.EncodeWin, "EncodeWin"},
|
||||
{encoder.EncodeSlash, "EncodeSlash"},
|
||||
{encoder.EncodeBackSlash, "EncodeBackSlash"},
|
||||
{encoder.EncodeHashPercent, "EncodeHashPercent"},
|
||||
{encoder.EncodeDel, "EncodeDel"},
|
||||
{encoder.EncodeCtl, "EncodeCtl"},
|
||||
{encoder.EncodeLeftSpace, "EncodeLeftSpace"},
|
||||
{encoder.EncodeLeftTilde, "EncodeLeftTilde"},
|
||||
{encoder.EncodeRightSpace, "EncodeRightSpace"},
|
||||
{encoder.EncodeRightPeriod, "EncodeRightPeriod"},
|
||||
{encoder.EncodeInvalidUtf8, "EncodeInvalidUtf8"},
|
||||
}
|
||||
var edges = []struct {
|
||||
mask uint
|
||||
name string
|
||||
edge int
|
||||
orig rune
|
||||
replace rune
|
||||
}{
|
||||
{encoder.EncodeLeftSpace, "EncodeLeftSpace", edgeLeft, ' ', '␠'},
|
||||
{encoder.EncodeLeftTilde, "EncodeLeftTilde", edgeLeft, '~', '~'},
|
||||
{encoder.EncodeRightSpace, "EncodeRightSpace", edgeRight, ' ', '␠'},
|
||||
{encoder.EncodeRightPeriod, "EncodeRightPeriod", edgeRight, '.', '.'},
|
||||
}
|
||||
|
||||
var allMappings = []mapping{{
|
||||
encoder.EncodeZero, []rune{
|
||||
0,
|
||||
}, []rune{
|
||||
'␀',
|
||||
}}, {
|
||||
encoder.EncodeWin, []rune{
|
||||
':', '?', '"', '*', '<', '>', '|',
|
||||
}, []rune{
|
||||
':', '?', '"', '*', '<', '>', '|',
|
||||
}}, {
|
||||
encoder.EncodeSlash, []rune{
|
||||
'/',
|
||||
}, []rune{
|
||||
'/',
|
||||
}}, {
|
||||
encoder.EncodeBackSlash, []rune{
|
||||
'\\',
|
||||
}, []rune{
|
||||
'\',
|
||||
}}, {
|
||||
encoder.EncodeHashPercent, []rune{
|
||||
'#', '%',
|
||||
}, []rune{
|
||||
'#', '%',
|
||||
}}, {
|
||||
encoder.EncodeDel, []rune{
|
||||
0x7F,
|
||||
}, []rune{
|
||||
'␡',
|
||||
}}, {
|
||||
encoder.EncodeCtl,
|
||||
runeRange(0x01, 0x1F),
|
||||
runeRange('␁', '␟'),
|
||||
}}
|
||||
|
||||
var (
|
||||
rng = rand.New(rand.NewSource(42))
|
||||
|
||||
printables = runeRange(0x20, 0x7E)
|
||||
fullwidthPrintables = runeRange(0xFF00, 0xFF5E)
|
||||
encodables = collectEncodables(allMappings)
|
||||
encoded = collectEncoded(allMappings)
|
||||
greek = runeRange(0x03B1, 0x03C9)
|
||||
)
|
||||
|
||||
func main() {
|
||||
fd, err := os.Create("encoder_cases_test.go")
|
||||
fatal(err, "Unable to open encoder_cases_test.go:")
|
||||
defer func() {
|
||||
fatal(fd.Close(), "Failed to close encoder_cases_test.go:")
|
||||
}()
|
||||
fatalW(fd.WriteString(header))("Failed to write header:")
|
||||
|
||||
fatalW(fd.WriteString("var testCasesSingle = []testCase{\n\t"))("Write:")
|
||||
_i := 0
|
||||
i := func() (r int) {
|
||||
r, _i = _i, _i+1
|
||||
return
|
||||
}
|
||||
for _, m := range maskBits {
|
||||
if len(getMapping(m.mask).src) == 0 {
|
||||
continue
|
||||
}
|
||||
if _i != 0 {
|
||||
fatalW(fd.WriteString(" "))("Write:")
|
||||
}
|
||||
in, out := buildTestString(
|
||||
[]mapping{getMapping(m.mask)}, // pick
|
||||
[]mapping{getMapping(0)}, // quote
|
||||
printables, fullwidthPrintables, encodables, encoded, greek) // fill
|
||||
fatalW(fmt.Fprintf(fd, `{ // %d
|
||||
mask: %s,
|
||||
in: %s,
|
||||
out: %s,
|
||||
},`, i(), m.name, strconv.Quote(in), strconv.Quote(out)))("Error writing test case:")
|
||||
}
|
||||
fatalW(fd.WriteString(`
|
||||
}
|
||||
|
||||
var testCasesSingleEdge = []testCase{
|
||||
`))("Write:")
|
||||
_i = 0
|
||||
for _, e := range edges {
|
||||
if _i != 0 {
|
||||
fatalW(fd.WriteString(" "))("Write:")
|
||||
}
|
||||
fatalW(fmt.Fprintf(fd, `{ // %d
|
||||
mask: %s,
|
||||
in: %s,
|
||||
out: %s,
|
||||
},`, i(), e.name, strconv.Quote(string(e.orig)), strconv.Quote(string(e.replace))))("Error writing test case:")
|
||||
for _, m := range maskBits {
|
||||
if len(getMapping(m.mask).src) == 0 {
|
||||
continue
|
||||
}
|
||||
pairs := buildEdgeTestString(
|
||||
e.edge, e.orig, e.replace,
|
||||
[]mapping{getMapping(0), getMapping(m.mask)}, // quote
|
||||
printables, fullwidthPrintables, encodables, encoded, greek) // fill
|
||||
for _, p := range pairs {
|
||||
fatalW(fmt.Fprintf(fd, ` { // %d
|
||||
mask: %s | %s,
|
||||
in: %s,
|
||||
out: %s,
|
||||
},`, i(), m.name, e.name, strconv.Quote(p.a), strconv.Quote(p.b)))("Error writing test case:")
|
||||
}
|
||||
}
|
||||
}
|
||||
fatalW(fmt.Fprintf(fd, ` { // %d
|
||||
mask: EncodeLeftSpace,
|
||||
in: " ",
|
||||
out: "␠ ",
|
||||
}, { // %d
|
||||
mask: EncodeLeftTilde,
|
||||
in: "~~",
|
||||
out: "~~",
|
||||
}, { // %d
|
||||
mask: EncodeRightSpace,
|
||||
in: " ",
|
||||
out: " ␠",
|
||||
}, { // %d
|
||||
mask: EncodeRightPeriod,
|
||||
in: "..",
|
||||
out: "..",
|
||||
}, { // %d
|
||||
mask: EncodeLeftSpace | EncodeRightPeriod,
|
||||
in: " .",
|
||||
out: "␠.",
|
||||
}, { // %d
|
||||
mask: EncodeLeftSpace | EncodeRightSpace,
|
||||
in: " ",
|
||||
out: "␠",
|
||||
}, { // %d
|
||||
mask: EncodeLeftSpace | EncodeRightSpace,
|
||||
in: " ",
|
||||
out: "␠␠",
|
||||
}, { // %d
|
||||
mask: EncodeLeftSpace | EncodeRightSpace,
|
||||
in: " ",
|
||||
out: "␠ ␠",
|
||||
},
|
||||
}
|
||||
`, i(), i(), i(), i(), i(), i(), i(), i()))("Error writing test case:")
|
||||
}
|
||||
|
||||
func fatal(err error, s ...interface{}) {
|
||||
if err != nil {
|
||||
log.Fatalln(append(s, err))
|
||||
}
|
||||
}
|
||||
func fatalW(_ int, err error) func(...interface{}) {
|
||||
if err != nil {
|
||||
return func(s ...interface{}) {
|
||||
log.Fatalln(append(s, err))
|
||||
}
|
||||
}
|
||||
return func(s ...interface{}) {}
|
||||
}
|
||||
|
||||
// construct a slice containing the runes between (l)ow (inclusive) and (h)igh (inclusive)
|
||||
func runeRange(l, h rune) []rune {
|
||||
if h < l {
|
||||
panic("invalid range")
|
||||
}
|
||||
out := make([]rune, h-l+1)
|
||||
for i := range out {
|
||||
out[i] = l + rune(i)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func getMapping(mask uint) mapping {
|
||||
for _, m := range allMappings {
|
||||
if m.mask == mask {
|
||||
return m
|
||||
}
|
||||
}
|
||||
return mapping{}
|
||||
}
|
||||
func collectEncodables(m []mapping) (out []rune) {
|
||||
for _, s := range m {
|
||||
for _, r := range s.src {
|
||||
out = append(out, r)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
func collectEncoded(m []mapping) (out []rune) {
|
||||
for _, s := range m {
|
||||
for _, r := range s.dst {
|
||||
out = append(out, r)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func buildTestString(mappings, testMappings []mapping, fill ...[]rune) (string, string) {
|
||||
combinedMappings := append(mappings, testMappings...)
|
||||
var (
|
||||
rIn []rune
|
||||
rOut []rune
|
||||
)
|
||||
for _, m := range mappings {
|
||||
if len(m.src) == 0 || len(m.src) != len(m.dst) {
|
||||
panic("invalid length")
|
||||
}
|
||||
rIn = append(rIn, m.src...)
|
||||
rOut = append(rOut, m.dst...)
|
||||
}
|
||||
inL := len(rIn)
|
||||
testL := inL * 3
|
||||
if testL < 30 {
|
||||
testL = 30
|
||||
}
|
||||
rIn = append(rIn, make([]rune, testL-inL)...)
|
||||
rOut = append(rOut, make([]rune, testL-inL)...)
|
||||
quoteOut := make([]bool, testL)
|
||||
set := func(i int, in, out rune, quote bool) {
|
||||
rIn[i] = in
|
||||
rOut[i] = out
|
||||
quoteOut[i] = quote
|
||||
}
|
||||
for i, r := range rOut[:inL] {
|
||||
set(inL+i, r, r, true)
|
||||
}
|
||||
|
||||
outer:
|
||||
for pos := inL * 2; pos < testL; pos++ {
|
||||
m := pos % len(fill)
|
||||
i := rng.Intn(len(fill[m]))
|
||||
r := fill[m][i]
|
||||
for _, m := range combinedMappings {
|
||||
if pSrc := runePos(r, m.src); pSrc != -1 {
|
||||
set(pos, r, m.dst[pSrc], false)
|
||||
continue outer
|
||||
} else if pDst := runePos(r, m.dst); pDst != -1 {
|
||||
set(pos, r, r, true)
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
set(pos, r, r, false)
|
||||
}
|
||||
|
||||
rng.Shuffle(testL, func(i, j int) {
|
||||
rIn[i], rIn[j] = rIn[j], rIn[i]
|
||||
rOut[i], rOut[j] = rOut[j], rOut[i]
|
||||
quoteOut[i], quoteOut[j] = quoteOut[j], quoteOut[i]
|
||||
})
|
||||
|
||||
var bOut strings.Builder
|
||||
bOut.Grow(testL)
|
||||
for i, r := range rOut {
|
||||
if quoteOut[i] {
|
||||
bOut.WriteRune(encoder.QuoteRune)
|
||||
}
|
||||
bOut.WriteRune(r)
|
||||
}
|
||||
return string(rIn), bOut.String()
|
||||
}
|
||||
|
||||
func buildEdgeTestString(edge int, orig, replace rune, testMappings []mapping, fill ...[]rune) (out []stringPair) {
|
||||
testL := 30
|
||||
rIn := make([]rune, testL)
|
||||
rOut := make([]rune, testL)
|
||||
quoteOut := make([]bool, testL)
|
||||
|
||||
set := func(i int, in, out rune, quote bool) {
|
||||
rIn[i] = in
|
||||
rOut[i] = out
|
||||
quoteOut[i] = quote
|
||||
}
|
||||
|
||||
outer:
|
||||
for pos := 0; pos < testL; pos++ {
|
||||
m := pos % len(fill)
|
||||
i := rng.Intn(len(fill[m]))
|
||||
r := fill[m][i]
|
||||
for _, m := range testMappings {
|
||||
if pSrc := runePos(r, m.src); pSrc != -1 {
|
||||
set(pos, r, m.dst[pSrc], false)
|
||||
continue outer
|
||||
} else if pDst := runePos(r, m.dst); pDst != -1 {
|
||||
set(pos, r, r, true)
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
set(pos, r, r, false)
|
||||
}
|
||||
|
||||
rng.Shuffle(testL, func(i, j int) {
|
||||
rIn[i], rIn[j] = rIn[j], rIn[i]
|
||||
rOut[i], rOut[j] = rOut[j], rOut[i]
|
||||
quoteOut[i], quoteOut[j] = quoteOut[j], quoteOut[i]
|
||||
})
|
||||
set(10, orig, orig, false)
|
||||
|
||||
out = append(out, stringPair{string(rIn), quotedToString(rOut, quoteOut)})
|
||||
for _, i := range []int{0, 1, testL - 2, testL - 1} {
|
||||
for _, j := range []int{1, testL - 2, testL - 1} {
|
||||
if j < i {
|
||||
continue
|
||||
}
|
||||
rIn := append([]rune{}, rIn...)
|
||||
rOut := append([]rune{}, rOut...)
|
||||
quoteOut := append([]bool{}, quoteOut...)
|
||||
|
||||
for _, in := range []rune{orig, replace} {
|
||||
expect, quote := in, false
|
||||
if i == 0 && edge == edgeLeft ||
|
||||
i == testL-1 && edge == edgeRight {
|
||||
expect, quote = replace, in == replace
|
||||
}
|
||||
rIn[i], rOut[i], quoteOut[i] = in, expect, quote
|
||||
|
||||
if i != j {
|
||||
for _, in := range []rune{orig, replace} {
|
||||
expect, quote = in, false
|
||||
if j == testL-1 && edge == edgeRight {
|
||||
expect, quote = replace, in == replace
|
||||
}
|
||||
rIn[j], rOut[j], quoteOut[j] = in, expect, quote
|
||||
}
|
||||
}
|
||||
out = append(out, stringPair{string(rIn), quotedToString(rOut, quoteOut)})
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func runePos(r rune, s []rune) int {
|
||||
for i, c := range s {
|
||||
if c == r {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// quotedToString returns a string for the chars slice where a encoder.QuoteRune is
|
||||
// inserted before a char[i] when quoted[i] is true.
|
||||
func quotedToString(chars []rune, quoted []bool) string {
|
||||
var out strings.Builder
|
||||
out.Grow(len(chars))
|
||||
for i, r := range chars {
|
||||
if quoted[i] {
|
||||
out.WriteRune(encoder.QuoteRune)
|
||||
}
|
||||
out.WriteRune(r)
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
76
lib/errors/errors.go
Normal file
76
lib/errors/errors.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// New returns an error that formats as the given text.
|
||||
func New(text string) error {
|
||||
return errors.New(text)
|
||||
}
|
||||
|
||||
// Errorf formats according to a format specifier and returns the string
|
||||
// as a value that satisfies error.
|
||||
func Errorf(format string, a ...interface{}) error {
|
||||
return fmt.Errorf(format, a...)
|
||||
}
|
||||
|
||||
// WalkFunc is the signature of the Walk callback function. The function gets the
|
||||
// current error in the chain and should return true if the chain processing
|
||||
// should be aborted.
|
||||
type WalkFunc func(error) bool
|
||||
|
||||
// Walk invokes the given function for each error in the chain. If the
|
||||
// provided functions returns true or no further cause can be found, the process
|
||||
// is stopped and no further calls will be made.
|
||||
//
|
||||
// The next error in the chain is determined by the following rules:
|
||||
// - If the current error has a `Cause() error` method (github.com/pkg/errors),
|
||||
// the return value of this method is used.
|
||||
// - If the current error has a `Unwrap() error` method (golang.org/x/xerrors),
|
||||
// the return value of this method is used.
|
||||
// - Common errors in the Go runtime that contain an Err field will use this value.
|
||||
func Walk(err error, f WalkFunc) {
|
||||
for prev := err; err != nil; prev = err {
|
||||
if f(err) {
|
||||
return
|
||||
}
|
||||
|
||||
switch e := err.(type) {
|
||||
case causer:
|
||||
err = e.Cause()
|
||||
case wrapper:
|
||||
err = e.Unwrap()
|
||||
default:
|
||||
// Unpack any struct or *struct with a field of name Err which satisfies
|
||||
// the error interface. This includes *url.Error, *net.OpError,
|
||||
// *os.SyscallError and many others in the stdlib.
|
||||
errType := reflect.TypeOf(err)
|
||||
errValue := reflect.ValueOf(err)
|
||||
if errValue.IsValid() && errType.Kind() == reflect.Ptr {
|
||||
errType = errType.Elem()
|
||||
errValue = errValue.Elem()
|
||||
}
|
||||
if errValue.IsValid() && errType.Kind() == reflect.Struct {
|
||||
if errField := errValue.FieldByName("Err"); errField.IsValid() {
|
||||
errFieldValue := errField.Interface()
|
||||
if newErr, ok := errFieldValue.(error); ok {
|
||||
err = newErr
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == prev {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
type wrapper interface {
|
||||
Unwrap() error
|
||||
}
|
||||
90
lib/errors/errors_test.go
Normal file
90
lib/errors/errors_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package errors_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/ncw/rclone/lib/errors"
|
||||
)
|
||||
|
||||
func TestWalk(t *testing.T) {
|
||||
origin := errors.New("origin")
|
||||
|
||||
for _, test := range []struct {
|
||||
err error
|
||||
calls int
|
||||
last error
|
||||
}{
|
||||
{causerError{nil}, 1, causerError{nil}},
|
||||
{wrapperError{nil}, 1, wrapperError{nil}},
|
||||
{reflectError{nil}, 1, reflectError{nil}},
|
||||
{causerError{origin}, 2, origin},
|
||||
{wrapperError{origin}, 2, origin},
|
||||
{reflectError{origin}, 2, origin},
|
||||
{causerError{reflectError{origin}}, 3, origin},
|
||||
{wrapperError{causerError{origin}}, 3, origin},
|
||||
{reflectError{wrapperError{origin}}, 3, origin},
|
||||
{causerError{reflectError{causerError{origin}}}, 4, origin},
|
||||
{wrapperError{causerError{wrapperError{origin}}}, 4, origin},
|
||||
{reflectError{wrapperError{reflectError{origin}}}, 4, origin},
|
||||
|
||||
{stopError{nil}, 1, stopError{nil}},
|
||||
{stopError{causerError{nil}}, 1, stopError{causerError{nil}}},
|
||||
{stopError{wrapperError{nil}}, 1, stopError{wrapperError{nil}}},
|
||||
{stopError{reflectError{nil}}, 1, stopError{reflectError{nil}}},
|
||||
{causerError{stopError{origin}}, 2, stopError{origin}},
|
||||
{wrapperError{stopError{origin}}, 2, stopError{origin}},
|
||||
{reflectError{stopError{origin}}, 2, stopError{origin}},
|
||||
{causerError{reflectError{stopError{nil}}}, 3, stopError{nil}},
|
||||
{wrapperError{causerError{stopError{nil}}}, 3, stopError{nil}},
|
||||
{reflectError{wrapperError{stopError{nil}}}, 3, stopError{nil}},
|
||||
} {
|
||||
var last error
|
||||
calls := 0
|
||||
errors.Walk(test.err, func(err error) bool {
|
||||
calls++
|
||||
last = err
|
||||
_, stop := err.(stopError)
|
||||
return stop
|
||||
})
|
||||
assert.Equal(t, test.calls, calls)
|
||||
assert.Equal(t, test.last, last)
|
||||
}
|
||||
}
|
||||
|
||||
type causerError struct {
|
||||
err error
|
||||
}
|
||||
type wrapperError struct {
|
||||
err error
|
||||
}
|
||||
type reflectError struct {
|
||||
Err error
|
||||
}
|
||||
type stopError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e causerError) Error() string {
|
||||
return fmt.Sprintf("causerError(%s)", e.err)
|
||||
}
|
||||
func (e causerError) Cause() error {
|
||||
return e.err
|
||||
}
|
||||
func (e wrapperError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
func (e wrapperError) Error() string {
|
||||
return fmt.Sprintf("wrapperError(%s)", e.err)
|
||||
}
|
||||
func (e reflectError) Error() string {
|
||||
return fmt.Sprintf("reflectError(%s)", e.Err)
|
||||
}
|
||||
func (e stopError) Error() string {
|
||||
return fmt.Sprintf("stopError(%s)", e.err)
|
||||
}
|
||||
func (e stopError) Cause() error {
|
||||
return e.err
|
||||
}
|
||||
@@ -2,74 +2,69 @@
|
||||
package pacer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"golang.org/x/time/rate"
|
||||
"github.com/ncw/rclone/lib/errors"
|
||||
)
|
||||
|
||||
// Pacer state
|
||||
type Pacer struct {
|
||||
mu sync.Mutex // Protecting read/writes
|
||||
minSleep time.Duration // minimum sleep time
|
||||
maxSleep time.Duration // maximum sleep time
|
||||
burst int // number of calls to send without rate limiting
|
||||
limiter *rate.Limiter // rate limiter for the minsleep
|
||||
decayConstant uint // decay constant
|
||||
attackConstant uint // attack constant
|
||||
pacer chan struct{} // To pace the operations
|
||||
sleepTime time.Duration // Time to sleep for each transaction
|
||||
retries int // Max number of retries
|
||||
maxConnections int // Maximum number of concurrent connections
|
||||
connTokens chan struct{} // Connection tokens
|
||||
calculatePace func(bool) // switchable pacing algorithm - call with mu held
|
||||
consecutiveRetries int // number of consecutive retries
|
||||
// State represents the public Pacer state that will be passed to the
|
||||
// configured Calculator
|
||||
type State struct {
|
||||
SleepTime time.Duration // current time to sleep before adding the pacer token back
|
||||
ConsecutiveRetries int // number of consecutive retries, will be 0 when the last invoker call returned false
|
||||
LastError error // the error returned by the last invoker call or nil
|
||||
}
|
||||
|
||||
// Type is for selecting different pacing algorithms
|
||||
type Type int
|
||||
// Calculator is a generic calculation function for a Pacer.
|
||||
type Calculator interface {
|
||||
// Calculate takes the current Pacer state and returns the sleep time after which
|
||||
// the next Pacer call will be done.
|
||||
Calculate(state State) time.Duration
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultPacer is a truncated exponential attack and decay.
|
||||
//
|
||||
// On retries the sleep time is doubled, on non errors then
|
||||
// sleeptime decays according to the decay constant as set
|
||||
// with SetDecayConstant.
|
||||
//
|
||||
// The sleep never goes below that set with SetMinSleep or
|
||||
// above that set with SetMaxSleep.
|
||||
DefaultPacer = Type(iota)
|
||||
// Pacer is the primary type of the pacer package. It allows to retry calls
|
||||
// with a configurable delay in between.
|
||||
type Pacer struct {
|
||||
pacerOptions
|
||||
mu sync.Mutex // Protecting read/writes
|
||||
pacer chan struct{} // To pace the operations
|
||||
connTokens chan struct{} // Connection tokens
|
||||
state State
|
||||
}
|
||||
type pacerOptions struct {
|
||||
maxConnections int // Maximum number of concurrent connections
|
||||
retries int // Max number of retries
|
||||
calculator Calculator // switchable pacing algorithm - call with mu held
|
||||
invoker InvokerFunc // wrapper function used to invoke the target function
|
||||
}
|
||||
|
||||
// AmazonCloudDrivePacer is a specialised pacer for Amazon Drive
|
||||
//
|
||||
// It implements a truncated exponential backoff strategy with
|
||||
// randomization. Normally operations are paced at the
|
||||
// interval set with SetMinSleep. On errors the sleep timer
|
||||
// is set to 0..2**retries seconds.
|
||||
//
|
||||
// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
|
||||
AmazonCloudDrivePacer
|
||||
// InvokerFunc is the signature of the wrapper function used to invoke the
|
||||
// target function in Pacer.
|
||||
type InvokerFunc func(try, tries int, f Paced) (bool, error)
|
||||
|
||||
// GoogleDrivePacer is a specialised pacer for Google Drive
|
||||
//
|
||||
// It implements a truncated exponential backoff strategy with
|
||||
// randomization. Normally operations are paced at the
|
||||
// interval set with SetMinSleep. On errors the sleep timer
|
||||
// is set to (2 ^ n) + random_number_milliseconds seconds
|
||||
//
|
||||
// See https://developers.google.com/drive/v2/web/handle-errors#exponential-backoff
|
||||
GoogleDrivePacer
|
||||
// Option can be used in New to configure the Pacer.
|
||||
type Option func(*pacerOptions)
|
||||
|
||||
// S3Pacer is a specialised pacer for S3
|
||||
//
|
||||
// It is basically the defaultPacer, but allows the sleep time to go to 0
|
||||
// when things are going well.
|
||||
S3Pacer
|
||||
)
|
||||
// CalculatorOption sets a Calculator for the new Pacer.
|
||||
func CalculatorOption(c Calculator) Option {
|
||||
return func(p *pacerOptions) { p.calculator = c }
|
||||
}
|
||||
|
||||
// RetriesOption sets the retries number for the new Pacer.
|
||||
func RetriesOption(retries int) Option {
|
||||
return func(p *pacerOptions) { p.retries = retries }
|
||||
}
|
||||
|
||||
// MaxConnectionsOption sets the maximum connections number for the new Pacer.
|
||||
func MaxConnectionsOption(maxConnections int) Option {
|
||||
return func(p *pacerOptions) { p.maxConnections = maxConnections }
|
||||
}
|
||||
|
||||
// InvokerOption sets a InvokerFunc for the new Pacer.
|
||||
func InvokerOption(invoker InvokerFunc) Option {
|
||||
return func(p *pacerOptions) { p.invoker = invoker }
|
||||
}
|
||||
|
||||
// Paced is a function which is called by the Call and CallNoRetry
|
||||
// methods. It should return a boolean, true if it would like to be
|
||||
@@ -77,19 +72,27 @@ const (
|
||||
// wrapped in a RetryError.
|
||||
type Paced func() (bool, error)
|
||||
|
||||
// New returns a Pacer with sensible defaults
|
||||
func New() *Pacer {
|
||||
p := &Pacer{
|
||||
maxSleep: 2 * time.Second,
|
||||
decayConstant: 2,
|
||||
attackConstant: 1,
|
||||
retries: fs.Config.LowLevelRetries,
|
||||
pacer: make(chan struct{}, 1),
|
||||
// New returns a Pacer with sensible defaults.
|
||||
func New(options ...Option) *Pacer {
|
||||
opts := pacerOptions{
|
||||
maxConnections: 10,
|
||||
retries: 3,
|
||||
}
|
||||
p.sleepTime = p.minSleep
|
||||
p.SetPacer(DefaultPacer)
|
||||
p.SetMaxConnections(fs.Config.Checkers + fs.Config.Transfers)
|
||||
p.SetMinSleep(10 * time.Millisecond)
|
||||
for _, o := range options {
|
||||
o(&opts)
|
||||
}
|
||||
p := &Pacer{
|
||||
pacerOptions: opts,
|
||||
pacer: make(chan struct{}, 1),
|
||||
}
|
||||
if p.calculator == nil {
|
||||
p.SetCalculator(nil)
|
||||
}
|
||||
p.state.SleepTime = p.calculator.Calculate(p.state)
|
||||
if p.invoker == nil {
|
||||
p.invoker = invoke
|
||||
}
|
||||
p.SetMaxConnections(p.maxConnections)
|
||||
|
||||
// Put the first pacing token in
|
||||
p.pacer <- struct{}{}
|
||||
@@ -97,54 +100,11 @@ func New() *Pacer {
|
||||
return p
|
||||
}
|
||||
|
||||
// SetSleep sets the current sleep time
|
||||
func (p *Pacer) SetSleep(t time.Duration) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.sleepTime = t
|
||||
return p
|
||||
}
|
||||
|
||||
// GetSleep gets the current sleep time
|
||||
func (p *Pacer) GetSleep() time.Duration {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
return p.sleepTime
|
||||
}
|
||||
|
||||
// SetMinSleep sets the minimum sleep time for the pacer
|
||||
func (p *Pacer) SetMinSleep(t time.Duration) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.minSleep = t
|
||||
p.sleepTime = p.minSleep
|
||||
p.limiter = rate.NewLimiter(rate.Every(p.minSleep), p.burst)
|
||||
return p
|
||||
}
|
||||
|
||||
// SetBurst sets the burst with no limiting of the pacer
|
||||
func (p *Pacer) SetBurst(n int) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.burst = n
|
||||
p.limiter = rate.NewLimiter(rate.Every(p.minSleep), p.burst)
|
||||
return p
|
||||
}
|
||||
|
||||
// SetMaxSleep sets the maximum sleep time for the pacer
|
||||
func (p *Pacer) SetMaxSleep(t time.Duration) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.maxSleep = t
|
||||
p.sleepTime = p.minSleep
|
||||
return p
|
||||
}
|
||||
|
||||
// SetMaxConnections sets the maximum number of concurrent connections.
|
||||
// Setting the value to 0 will allow unlimited number of connections.
|
||||
// Should not be changed once you have started calling the pacer.
|
||||
// By default this will be set to fs.Config.Checkers.
|
||||
func (p *Pacer) SetMaxConnections(n int) *Pacer {
|
||||
func (p *Pacer) SetMaxConnections(n int) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.maxConnections = n
|
||||
@@ -156,61 +116,34 @@ func (p *Pacer) SetMaxConnections(n int) *Pacer {
|
||||
p.connTokens <- struct{}{}
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// SetDecayConstant sets the decay constant for the pacer
|
||||
//
|
||||
// This is the speed the time falls back to the minimum after errors
|
||||
// have occurred.
|
||||
//
|
||||
// bigger for slower decay, exponential. 1 is halve, 0 is go straight to minimum
|
||||
func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.decayConstant = decay
|
||||
return p
|
||||
}
|
||||
|
||||
// SetAttackConstant sets the attack constant for the pacer
|
||||
//
|
||||
// This is the speed the time grows from the minimum after errors have
|
||||
// occurred.
|
||||
//
|
||||
// bigger for slower attack, 1 is double, 0 is go straight to maximum
|
||||
func (p *Pacer) SetAttackConstant(attack uint) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.attackConstant = attack
|
||||
return p
|
||||
}
|
||||
|
||||
// SetRetries sets the max number of tries for Call
|
||||
func (p *Pacer) SetRetries(retries int) *Pacer {
|
||||
// SetRetries sets the max number of retries for Call
|
||||
func (p *Pacer) SetRetries(retries int) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.retries = retries
|
||||
return p
|
||||
}
|
||||
|
||||
// SetPacer sets the pacing algorithm
|
||||
// SetCalculator sets the pacing algorithm. Don't modify the Calculator object
|
||||
// afterwards, use the ModifyCalculator method when needed.
|
||||
//
|
||||
// It will choose the default algorithm if an incorrect value is
|
||||
// passed in.
|
||||
func (p *Pacer) SetPacer(t Type) *Pacer {
|
||||
// It will choose the default algorithm if nil is passed in.
|
||||
func (p *Pacer) SetCalculator(c Calculator) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
switch t {
|
||||
case AmazonCloudDrivePacer:
|
||||
p.calculatePace = p.acdPacer
|
||||
case GoogleDrivePacer:
|
||||
p.calculatePace = p.drivePacer
|
||||
case S3Pacer:
|
||||
p.calculatePace = p.s3Pacer
|
||||
default:
|
||||
p.calculatePace = p.defaultPacer
|
||||
if c == nil {
|
||||
c = NewDefault()
|
||||
}
|
||||
return p
|
||||
p.calculator = c
|
||||
}
|
||||
|
||||
// ModifyCalculator calls the given function with the currently configured
|
||||
// Calculator and the Pacer lock held.
|
||||
func (p *Pacer) ModifyCalculator(f func(Calculator)) {
|
||||
p.mu.Lock()
|
||||
f(p.calculator)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// Start a call to the API
|
||||
@@ -230,170 +163,29 @@ func (p *Pacer) beginCall() {
|
||||
|
||||
p.mu.Lock()
|
||||
// Restart the timer
|
||||
go func(sleepTime, minSleep time.Duration) {
|
||||
// fs.Debugf(f, "New sleep for %v at %v", t, time.Now())
|
||||
// Sleep the minimum time with the rate limiter
|
||||
if minSleep > 0 && sleepTime >= minSleep {
|
||||
_ = p.limiter.Wait(context.Background())
|
||||
sleepTime -= minSleep
|
||||
}
|
||||
// Then sleep the remaining time
|
||||
if sleepTime > 0 {
|
||||
time.Sleep(sleepTime)
|
||||
}
|
||||
go func(t time.Duration) {
|
||||
time.Sleep(t)
|
||||
p.pacer <- struct{}{}
|
||||
}(p.sleepTime, p.minSleep)
|
||||
}(p.state.SleepTime)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// exponentialImplementation implements a exponentialImplementation up
|
||||
// and down pacing algorithm
|
||||
//
|
||||
// See the description for DefaultPacer
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) defaultPacer(retry bool) {
|
||||
oldSleepTime := p.sleepTime
|
||||
if retry {
|
||||
if p.attackConstant == 0 {
|
||||
p.sleepTime = p.maxSleep
|
||||
} else {
|
||||
p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
|
||||
}
|
||||
if p.sleepTime > p.maxSleep {
|
||||
p.sleepTime = p.maxSleep
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
|
||||
if p.sleepTime < p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// acdPacer implements a truncated exponential backoff
|
||||
// strategy with randomization for Amazon Drive
|
||||
//
|
||||
// See the description for AmazonCloudDrivePacer
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) acdPacer(retry bool) {
|
||||
consecutiveRetries := p.consecutiveRetries
|
||||
if consecutiveRetries == 0 {
|
||||
if p.sleepTime != p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
if consecutiveRetries > 9 {
|
||||
consecutiveRetries = 9
|
||||
}
|
||||
// consecutiveRetries starts at 1 so
|
||||
// maxSleep is 2**(consecutiveRetries-1) seconds
|
||||
maxSleep := time.Second << uint(consecutiveRetries-1)
|
||||
// actual sleep is random from 0..maxSleep
|
||||
p.sleepTime = time.Duration(rand.Int63n(int64(maxSleep)))
|
||||
if p.sleepTime < p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
}
|
||||
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
|
||||
}
|
||||
}
|
||||
|
||||
// drivePacer implements a truncated exponential backoff strategy with
|
||||
// randomization for Google Drive
|
||||
//
|
||||
// See the description for GoogleDrivePacer
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) drivePacer(retry bool) {
|
||||
consecutiveRetries := p.consecutiveRetries
|
||||
if consecutiveRetries == 0 {
|
||||
if p.sleepTime != p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
if consecutiveRetries > 5 {
|
||||
consecutiveRetries = 5
|
||||
}
|
||||
// consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16
|
||||
// maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds
|
||||
p.sleepTime = time.Second<<uint(consecutiveRetries-1) + time.Duration(rand.Int63n(int64(time.Second)))
|
||||
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
|
||||
}
|
||||
}
|
||||
|
||||
// s3Pacer implements a pacer compatible with our expectations of S3, where it tries to not
|
||||
// delay at all between successful calls, but backs off in the default fashion in response
|
||||
// to any errors.
|
||||
// The assumption is that errors should be exceedingly rare (S3 seems to have largely solved
|
||||
// the sort of scability questions rclone is likely to run into), and in the happy case
|
||||
// it can handle calls with no delays between them.
|
||||
//
|
||||
// Basically defaultPacer, but with some handling of sleepTime going to/from 0ms
|
||||
// Ignores minSleep entirely
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) s3Pacer(retry bool) {
|
||||
oldSleepTime := p.sleepTime
|
||||
if retry {
|
||||
if p.attackConstant == 0 {
|
||||
p.sleepTime = p.maxSleep
|
||||
} else {
|
||||
if p.sleepTime == 0 {
|
||||
p.sleepTime = p.minSleep
|
||||
} else {
|
||||
p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
|
||||
}
|
||||
}
|
||||
if p.sleepTime > p.maxSleep {
|
||||
p.sleepTime = p.maxSleep
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
|
||||
if p.sleepTime < p.minSleep {
|
||||
p.sleepTime = 0
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// endCall implements the pacing algorithm
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
func (p *Pacer) endCall(retry bool) {
|
||||
func (p *Pacer) endCall(retry bool, err error) {
|
||||
if p.maxConnections > 0 {
|
||||
p.connTokens <- struct{}{}
|
||||
}
|
||||
p.mu.Lock()
|
||||
if retry {
|
||||
p.consecutiveRetries++
|
||||
p.state.ConsecutiveRetries++
|
||||
} else {
|
||||
p.consecutiveRetries = 0
|
||||
p.state.ConsecutiveRetries = 0
|
||||
}
|
||||
p.calculatePace(retry)
|
||||
p.state.LastError = err
|
||||
p.state.SleepTime = p.calculator.Calculate(p.state)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -402,15 +194,11 @@ func (p *Pacer) call(fn Paced, retries int) (err error) {
|
||||
var retry bool
|
||||
for i := 1; i <= retries; i++ {
|
||||
p.beginCall()
|
||||
retry, err = fn()
|
||||
p.endCall(retry)
|
||||
retry, err = p.invoker(i, retries, fn)
|
||||
p.endCall(retry, err)
|
||||
if !retry {
|
||||
break
|
||||
}
|
||||
fs.Debugf("pacer", "low level retry %d/%d (error %v)", i, retries, err)
|
||||
}
|
||||
if retry {
|
||||
err = fserrors.RetryError(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -436,3 +224,41 @@ func (p *Pacer) Call(fn Paced) (err error) {
|
||||
func (p *Pacer) CallNoRetry(fn Paced) error {
|
||||
return p.call(fn, 1)
|
||||
}
|
||||
|
||||
func invoke(try, tries int, f Paced) (bool, error) {
|
||||
return f()
|
||||
}
|
||||
|
||||
type retryAfterError struct {
|
||||
error
|
||||
retryAfter time.Duration
|
||||
}
|
||||
|
||||
func (r *retryAfterError) Error() string {
|
||||
return r.error.Error()
|
||||
}
|
||||
|
||||
func (r *retryAfterError) Cause() error {
|
||||
return r.error
|
||||
}
|
||||
|
||||
// RetryAfterError returns a wrapped error that can be used by Calculator implementations
|
||||
func RetryAfterError(err error, retryAfter time.Duration) error {
|
||||
return &retryAfterError{
|
||||
error: err,
|
||||
retryAfter: retryAfter,
|
||||
}
|
||||
}
|
||||
|
||||
// IsRetryAfter returns true if the the error or any of it's Cause's is an error
|
||||
// returned by RetryAfterError. It also returns the associated Duration if possible.
|
||||
func IsRetryAfter(err error) (retryAfter time.Duration, isRetryAfter bool) {
|
||||
errors.Walk(err, func(err error) bool {
|
||||
if r, ok := err.(*retryAfterError); ok {
|
||||
retryAfter, isRetryAfter = r.retryAfter, true
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,181 +1,85 @@
|
||||
package pacer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
const expectedRetries = 7
|
||||
fs.Config.LowLevelRetries = expectedRetries
|
||||
p := New()
|
||||
if p.minSleep != 10*time.Millisecond {
|
||||
t.Errorf("minSleep")
|
||||
}
|
||||
if p.maxSleep != 2*time.Second {
|
||||
t.Errorf("maxSleep")
|
||||
}
|
||||
if p.sleepTime != p.minSleep {
|
||||
t.Errorf("sleepTime")
|
||||
}
|
||||
if p.retries != expectedRetries {
|
||||
t.Errorf("retries want %v got %v", expectedRetries, p.retries)
|
||||
}
|
||||
if p.decayConstant != 2 {
|
||||
t.Errorf("decayConstant")
|
||||
}
|
||||
if p.attackConstant != 1 {
|
||||
t.Errorf("attackConstant")
|
||||
}
|
||||
if cap(p.pacer) != 1 {
|
||||
t.Errorf("pacer 1")
|
||||
}
|
||||
if len(p.pacer) != 1 {
|
||||
t.Errorf("pacer 2")
|
||||
}
|
||||
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.defaultPacer) {
|
||||
t.Errorf("calculatePace")
|
||||
}
|
||||
if p.maxConnections != fs.Config.Checkers+fs.Config.Transfers {
|
||||
t.Errorf("maxConnections")
|
||||
}
|
||||
if cap(p.connTokens) != fs.Config.Checkers+fs.Config.Transfers {
|
||||
t.Errorf("connTokens")
|
||||
}
|
||||
if p.consecutiveRetries != 0 {
|
||||
t.Errorf("consecutiveRetries")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetSleep(t *testing.T) {
|
||||
p := New().SetSleep(2 * time.Millisecond)
|
||||
if p.sleepTime != 2*time.Millisecond {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSleep(t *testing.T) {
|
||||
p := New().SetSleep(2 * time.Millisecond)
|
||||
if p.GetSleep() != 2*time.Millisecond {
|
||||
t.Errorf("didn't get")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetMinSleep(t *testing.T) {
|
||||
p := New().SetMinSleep(1 * time.Millisecond)
|
||||
if p.minSleep != 1*time.Millisecond {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetMaxSleep(t *testing.T) {
|
||||
p := New().SetMaxSleep(100 * time.Second)
|
||||
if p.maxSleep != 100*time.Second {
|
||||
t.Errorf("didn't set")
|
||||
const expectedConnections = 9
|
||||
p := New(RetriesOption(expectedRetries), MaxConnectionsOption(expectedConnections))
|
||||
if d, ok := p.calculator.(*Default); ok {
|
||||
assert.Equal(t, 10*time.Millisecond, d.minSleep)
|
||||
assert.Equal(t, 2*time.Second, d.maxSleep)
|
||||
assert.Equal(t, d.minSleep, p.state.SleepTime)
|
||||
assert.Equal(t, uint(2), d.decayConstant)
|
||||
assert.Equal(t, uint(1), d.attackConstant)
|
||||
} else {
|
||||
t.Errorf("calculator")
|
||||
}
|
||||
assert.Equal(t, expectedRetries, p.retries)
|
||||
assert.Equal(t, 1, cap(p.pacer))
|
||||
assert.Equal(t, 1, len(p.pacer))
|
||||
assert.Equal(t, expectedConnections, p.maxConnections)
|
||||
assert.Equal(t, expectedConnections, cap(p.connTokens))
|
||||
assert.Equal(t, 0, p.state.ConsecutiveRetries)
|
||||
}
|
||||
|
||||
func TestMaxConnections(t *testing.T) {
|
||||
p := New().SetMaxConnections(20)
|
||||
if p.maxConnections != 20 {
|
||||
t.Errorf("maxConnections")
|
||||
}
|
||||
if cap(p.connTokens) != 20 {
|
||||
t.Errorf("connTokens")
|
||||
}
|
||||
p := New()
|
||||
p.SetMaxConnections(20)
|
||||
assert.Equal(t, 20, p.maxConnections)
|
||||
assert.Equal(t, 20, cap(p.connTokens))
|
||||
p.SetMaxConnections(0)
|
||||
if p.maxConnections != 0 {
|
||||
t.Errorf("maxConnections is not 0")
|
||||
}
|
||||
if p.connTokens != nil {
|
||||
t.Errorf("connTokens is not nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDecayConstant(t *testing.T) {
|
||||
p := New().SetDecayConstant(17)
|
||||
if p.decayConstant != 17 {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
assert.Equal(t, 0, p.maxConnections)
|
||||
assert.Nil(t, p.connTokens)
|
||||
}
|
||||
|
||||
func TestDecay(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Microsecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second)
|
||||
c := NewDefault(MinSleep(1*time.Microsecond), MaxSleep(1*time.Second))
|
||||
for _, test := range []struct {
|
||||
in time.Duration
|
||||
in State
|
||||
attackConstant uint
|
||||
want time.Duration
|
||||
}{
|
||||
{8 * time.Millisecond, 1, 4 * time.Millisecond},
|
||||
{1 * time.Millisecond, 0, time.Microsecond},
|
||||
{1 * time.Millisecond, 2, (3 * time.Millisecond) / 4},
|
||||
{1 * time.Millisecond, 3, (7 * time.Millisecond) / 8},
|
||||
{State{SleepTime: 8 * time.Millisecond}, 1, 4 * time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Millisecond}, 0, 1 * time.Microsecond},
|
||||
{State{SleepTime: 1 * time.Millisecond}, 2, (3 * time.Millisecond) / 4},
|
||||
{State{SleepTime: 1 * time.Millisecond}, 3, (7 * time.Millisecond) / 8},
|
||||
} {
|
||||
p.sleepTime = test.in
|
||||
p.SetDecayConstant(test.attackConstant)
|
||||
p.defaultPacer(false)
|
||||
got := p.sleepTime
|
||||
if got != test.want {
|
||||
t.Errorf("bad sleep want %v got %v", test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetAttackConstant(t *testing.T) {
|
||||
p := New().SetAttackConstant(19)
|
||||
if p.attackConstant != 19 {
|
||||
t.Errorf("didn't set")
|
||||
c.decayConstant = test.attackConstant
|
||||
got := c.Calculate(test.in)
|
||||
assert.Equal(t, test.want, got, "test: %+v", test)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttack(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Microsecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second)
|
||||
c := NewDefault(MinSleep(1*time.Microsecond), MaxSleep(1*time.Second))
|
||||
for _, test := range []struct {
|
||||
in time.Duration
|
||||
in State
|
||||
attackConstant uint
|
||||
want time.Duration
|
||||
}{
|
||||
{1 * time.Millisecond, 1, 2 * time.Millisecond},
|
||||
{1 * time.Millisecond, 0, time.Second},
|
||||
{1 * time.Millisecond, 2, (4 * time.Millisecond) / 3},
|
||||
{1 * time.Millisecond, 3, (8 * time.Millisecond) / 7},
|
||||
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 1, 2 * time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 0, 1 * time.Second},
|
||||
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 2, (4 * time.Millisecond) / 3},
|
||||
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 3, (8 * time.Millisecond) / 7},
|
||||
} {
|
||||
p.sleepTime = test.in
|
||||
p.SetAttackConstant(test.attackConstant)
|
||||
p.defaultPacer(true)
|
||||
got := p.sleepTime
|
||||
if got != test.want {
|
||||
t.Errorf("bad sleep want %v got %v", test.want, got)
|
||||
}
|
||||
c.attackConstant = test.attackConstant
|
||||
got := c.Calculate(test.in)
|
||||
assert.Equal(t, test.want, got, "test: %+v", test)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSetRetries(t *testing.T) {
|
||||
p := New().SetRetries(18)
|
||||
if p.retries != 18 {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetPacer(t *testing.T) {
|
||||
p := New().SetPacer(AmazonCloudDrivePacer)
|
||||
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.acdPacer) {
|
||||
t.Errorf("calculatePace is not acdPacer")
|
||||
}
|
||||
p.SetPacer(GoogleDrivePacer)
|
||||
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.drivePacer) {
|
||||
t.Errorf("calculatePace is not drivePacer")
|
||||
}
|
||||
p.SetPacer(DefaultPacer)
|
||||
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.defaultPacer) {
|
||||
t.Errorf("calculatePace is not defaultPacer")
|
||||
}
|
||||
p := New()
|
||||
p.SetRetries(18)
|
||||
assert.Equal(t, 18, p.retries)
|
||||
}
|
||||
|
||||
// emptyTokens empties the pacer of all its tokens
|
||||
@@ -200,7 +104,7 @@ func waitForPace(p *Pacer, duration time.Duration) (when time.Time) {
|
||||
}
|
||||
|
||||
func TestBeginCall(t *testing.T) {
|
||||
p := New().SetMaxConnections(10).SetMinSleep(1 * time.Millisecond)
|
||||
p := New(MaxConnectionsOption(10), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond))))
|
||||
emptyTokens(p)
|
||||
go p.beginCall()
|
||||
if !waitForPace(p, 10*time.Millisecond).IsZero() {
|
||||
@@ -223,7 +127,7 @@ func TestBeginCall(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBeginCallZeroConnections(t *testing.T) {
|
||||
p := New().SetMaxConnections(0).SetMinSleep(1 * time.Millisecond)
|
||||
p := New(MaxConnectionsOption(0), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond))))
|
||||
emptyTokens(p)
|
||||
go p.beginCall()
|
||||
if !waitForPace(p, 10*time.Millisecond).IsZero() {
|
||||
@@ -241,155 +145,143 @@ func TestBeginCallZeroConnections(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDefaultPacer(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second).SetDecayConstant(2)
|
||||
c := NewDefault(MinSleep(1*time.Millisecond), MaxSleep(1*time.Second), DecayConstant(2))
|
||||
for _, test := range []struct {
|
||||
in time.Duration
|
||||
retry bool
|
||||
state State
|
||||
want time.Duration
|
||||
}{
|
||||
{time.Millisecond, true, 2 * time.Millisecond},
|
||||
{time.Second, true, time.Second},
|
||||
{(3 * time.Second) / 4, true, time.Second},
|
||||
{time.Second, false, 750 * time.Millisecond},
|
||||
{1000 * time.Microsecond, false, time.Millisecond},
|
||||
{1200 * time.Microsecond, false, time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 2 * time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 1 * time.Second},
|
||||
{State{SleepTime: (3 * time.Second) / 4, ConsecutiveRetries: 1}, 1 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second}, 750 * time.Millisecond},
|
||||
{State{SleepTime: 1000 * time.Microsecond}, 1 * time.Millisecond},
|
||||
{State{SleepTime: 1200 * time.Microsecond}, 1 * time.Millisecond},
|
||||
} {
|
||||
p.sleepTime = test.in
|
||||
p.defaultPacer(test.retry)
|
||||
got := p.sleepTime
|
||||
if got != test.want {
|
||||
t.Errorf("bad sleep want %v got %v", test.want, got)
|
||||
}
|
||||
got := c.Calculate(test.state)
|
||||
assert.Equal(t, test.want, got, "test: %+v", test)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAmazonCloudDrivePacer(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetPacer(AmazonCloudDrivePacer).SetMaxSleep(time.Second).SetDecayConstant(2)
|
||||
c := NewAmazonCloudDrive(MinSleep(1 * time.Millisecond))
|
||||
// Do lots of times because of the random number!
|
||||
for _, test := range []struct {
|
||||
in time.Duration
|
||||
consecutiveRetries int
|
||||
retry bool
|
||||
want time.Duration
|
||||
state State
|
||||
want time.Duration
|
||||
}{
|
||||
{time.Millisecond, 0, true, time.Millisecond},
|
||||
{10 * time.Millisecond, 0, true, time.Millisecond},
|
||||
{1 * time.Second, 1, true, 500 * time.Millisecond},
|
||||
{1 * time.Second, 2, true, 1 * time.Second},
|
||||
{1 * time.Second, 3, true, 2 * time.Second},
|
||||
{1 * time.Second, 4, true, 4 * time.Second},
|
||||
{1 * time.Second, 5, true, 8 * time.Second},
|
||||
{1 * time.Second, 6, true, 16 * time.Second},
|
||||
{1 * time.Second, 7, true, 32 * time.Second},
|
||||
{1 * time.Second, 8, true, 64 * time.Second},
|
||||
{1 * time.Second, 9, true, 128 * time.Second},
|
||||
{1 * time.Second, 10, true, 128 * time.Second},
|
||||
{1 * time.Second, 11, true, 128 * time.Second},
|
||||
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 0}, 1 * time.Millisecond},
|
||||
{State{SleepTime: 10 * time.Millisecond, ConsecutiveRetries: 0}, 1 * time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 500 * time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 2}, 1 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 3}, 2 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 4}, 4 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 5}, 8 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 6}, 16 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 7}, 32 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 8}, 64 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 9}, 128 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 10}, 128 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 11}, 128 * time.Second},
|
||||
} {
|
||||
const n = 1000
|
||||
var sum time.Duration
|
||||
// measure average time over n cycles
|
||||
for i := 0; i < n; i++ {
|
||||
p.sleepTime = test.in
|
||||
p.consecutiveRetries = test.consecutiveRetries
|
||||
p.acdPacer(test.retry)
|
||||
sum += p.sleepTime
|
||||
sum += c.Calculate(test.state)
|
||||
}
|
||||
got := sum / n
|
||||
//t.Logf("%+v: got = %v", test, got)
|
||||
if got < (test.want*9)/10 || got > (test.want*11)/10 {
|
||||
t.Fatalf("%+v: bad sleep want %v+/-10%% got %v", test, test.want, got)
|
||||
}
|
||||
assert.False(t, got < (test.want*9)/10 || got > (test.want*11)/10, "test: %+v", test)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleDrivePacer(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetPacer(GoogleDrivePacer).SetMaxSleep(time.Second).SetDecayConstant(2)
|
||||
// Do lots of times because of the random number!
|
||||
for _, test := range []struct {
|
||||
in time.Duration
|
||||
consecutiveRetries int
|
||||
retry bool
|
||||
want time.Duration
|
||||
state State
|
||||
want time.Duration
|
||||
}{
|
||||
{time.Millisecond, 0, true, time.Millisecond},
|
||||
{10 * time.Millisecond, 0, true, time.Millisecond},
|
||||
{1 * time.Second, 1, true, 1*time.Second + 500*time.Millisecond},
|
||||
{1 * time.Second, 2, true, 2*time.Second + 500*time.Millisecond},
|
||||
{1 * time.Second, 3, true, 4*time.Second + 500*time.Millisecond},
|
||||
{1 * time.Second, 4, true, 8*time.Second + 500*time.Millisecond},
|
||||
{1 * time.Second, 5, true, 16*time.Second + 500*time.Millisecond},
|
||||
{1 * time.Second, 6, true, 16*time.Second + 500*time.Millisecond},
|
||||
{1 * time.Second, 7, true, 16*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Millisecond}, 0},
|
||||
{State{SleepTime: 10 * time.Millisecond}, 0},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 1*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 2}, 2*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 3}, 4*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 4}, 8*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 5}, 16*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 6}, 16*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 7}, 16*time.Second + 500*time.Millisecond},
|
||||
} {
|
||||
const n = 1000
|
||||
var sum time.Duration
|
||||
// measure average time over n cycles
|
||||
for i := 0; i < n; i++ {
|
||||
p.sleepTime = test.in
|
||||
p.consecutiveRetries = test.consecutiveRetries
|
||||
p.drivePacer(test.retry)
|
||||
sum += p.sleepTime
|
||||
c := NewGoogleDrive(MinSleep(1 * time.Millisecond))
|
||||
sum += c.Calculate(test.state)
|
||||
}
|
||||
got := sum / n
|
||||
//t.Logf("%+v: got = %v", test, got)
|
||||
if got < (test.want*9)/10 || got > (test.want*11)/10 {
|
||||
t.Fatalf("%+v: bad sleep want %v+/-10%% got %v", test, test.want, got)
|
||||
assert.False(t, got < (test.want*9)/10 || got > (test.want*11)/10, "test: %+v, got: %v", test, got)
|
||||
}
|
||||
|
||||
const minSleep = 2 * time.Millisecond
|
||||
for _, test := range []struct {
|
||||
calls int
|
||||
want int
|
||||
}{
|
||||
{1, 0},
|
||||
{9, 0},
|
||||
{10, 0},
|
||||
{11, 1},
|
||||
{12, 2},
|
||||
} {
|
||||
c := NewGoogleDrive(MinSleep(minSleep), Burst(10))
|
||||
count := 0
|
||||
for i := 0; i < test.calls; i++ {
|
||||
sleep := c.Calculate(State{})
|
||||
if sleep != 0 {
|
||||
count++
|
||||
}
|
||||
}
|
||||
assert.Equalf(t, test.want, count, "test: %+v, got: %v", test, count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3Pacer(t *testing.T) {
|
||||
p := New().SetMinSleep(10 * time.Millisecond).SetPacer(S3Pacer).SetMaxSleep(time.Second).SetDecayConstant(2)
|
||||
c := NewS3(MinSleep(10*time.Millisecond), MaxSleep(1*time.Second), DecayConstant(2))
|
||||
for _, test := range []struct {
|
||||
in time.Duration
|
||||
retry bool
|
||||
state State
|
||||
want time.Duration
|
||||
}{
|
||||
{0, true, 10 * time.Millisecond}, //Things were going ok, we failed once, back off to minSleep
|
||||
{10 * time.Millisecond, true, 20 * time.Millisecond}, //Another fail, double the backoff
|
||||
{10 * time.Millisecond, false, 0}, //Things start going ok when we're at minSleep; should result in no sleep
|
||||
{12 * time.Millisecond, false, 0}, //*near* minsleep and going ok, decay would take below minSleep, should go to 0
|
||||
{0, false, 0}, //Things have been going ok; not retrying should keep sleep at 0
|
||||
{time.Second, true, time.Second}, //Check maxSleep is enforced
|
||||
{(3 * time.Second) / 4, true, time.Second}, //Check attack heading to maxSleep doesn't exceed maxSleep
|
||||
{time.Second, false, 750 * time.Millisecond}, //Check decay from maxSleep
|
||||
{48 * time.Millisecond, false, 36 * time.Millisecond}, //Check simple decay above minSleep
|
||||
{State{SleepTime: 0, ConsecutiveRetries: 1}, 10 * time.Millisecond}, //Things were going ok, we failed once, back off to minSleep
|
||||
{State{SleepTime: 10 * time.Millisecond, ConsecutiveRetries: 1}, 20 * time.Millisecond}, //Another fail, double the backoff
|
||||
{State{SleepTime: 10 * time.Millisecond}, 0}, //Things start going ok when we're at minSleep; should result in no sleep
|
||||
{State{SleepTime: 12 * time.Millisecond}, 0}, //*near* minsleep and going ok, decay would take below minSleep, should go to 0
|
||||
{State{SleepTime: 0}, 0}, //Things have been going ok; not retrying should keep sleep at 0
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 1 * time.Second}, //Check maxSleep is enforced
|
||||
{State{SleepTime: (3 * time.Second) / 4, ConsecutiveRetries: 1}, 1 * time.Second}, //Check attack heading to maxSleep doesn't exceed maxSleep
|
||||
{State{SleepTime: 1 * time.Second}, 750 * time.Millisecond}, //Check decay from maxSleep
|
||||
{State{SleepTime: 48 * time.Millisecond}, 36 * time.Millisecond}, //Check simple decay above minSleep
|
||||
} {
|
||||
p.sleepTime = test.in
|
||||
p.s3Pacer(test.retry)
|
||||
got := p.sleepTime
|
||||
if got != test.want {
|
||||
t.Errorf("bad sleep for %v with retry %v: want %v got %v", test.in, test.retry, test.want, got)
|
||||
}
|
||||
got := c.Calculate(test.state)
|
||||
assert.Equal(t, test.want, got, "test: %+v", test)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndCall(t *testing.T) {
|
||||
p := New().SetMaxConnections(5)
|
||||
p := New(MaxConnectionsOption(5))
|
||||
emptyTokens(p)
|
||||
p.consecutiveRetries = 1
|
||||
p.endCall(true)
|
||||
if len(p.connTokens) != 1 {
|
||||
t.Errorf("Expecting 1 token")
|
||||
}
|
||||
if p.consecutiveRetries != 2 {
|
||||
t.Errorf("Bad consecutive retries")
|
||||
}
|
||||
p.state.ConsecutiveRetries = 1
|
||||
p.endCall(true, nil)
|
||||
assert.Equal(t, 1, len(p.connTokens))
|
||||
assert.Equal(t, 2, p.state.ConsecutiveRetries)
|
||||
}
|
||||
|
||||
func TestEndCallZeroConnections(t *testing.T) {
|
||||
p := New().SetMaxConnections(0)
|
||||
p := New(MaxConnectionsOption(0))
|
||||
emptyTokens(p)
|
||||
p.consecutiveRetries = 1
|
||||
p.endCall(false)
|
||||
if len(p.connTokens) != 0 {
|
||||
t.Errorf("Expecting 0 token")
|
||||
}
|
||||
if p.consecutiveRetries != 0 {
|
||||
t.Errorf("Bad consecutive retries")
|
||||
}
|
||||
p.state.ConsecutiveRetries = 1
|
||||
p.endCall(false, nil)
|
||||
assert.Equal(t, 0, len(p.connTokens))
|
||||
assert.Equal(t, 0, p.state.ConsecutiveRetries)
|
||||
}
|
||||
|
||||
var errFoo = errors.New("foo")
|
||||
@@ -397,67 +289,79 @@ var errFoo = errors.New("foo")
|
||||
type dummyPaced struct {
|
||||
retry bool
|
||||
called int
|
||||
wait *sync.Cond
|
||||
}
|
||||
|
||||
func (dp *dummyPaced) fn() (bool, error) {
|
||||
dp.called++
|
||||
if dp.wait != nil {
|
||||
dp.wait.L.Lock()
|
||||
dp.called++
|
||||
dp.wait.Wait()
|
||||
dp.wait.L.Unlock()
|
||||
} else {
|
||||
dp.called++
|
||||
}
|
||||
return dp.retry, errFoo
|
||||
}
|
||||
|
||||
func Test_callNoRetry(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond)
|
||||
func TestCallFixed(t *testing.T) {
|
||||
p := New(CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
|
||||
|
||||
dp := &dummyPaced{retry: false}
|
||||
err := p.call(dp.fn, 10)
|
||||
if dp.called != 1 {
|
||||
t.Errorf("called want %d got %d", 1, dp.called)
|
||||
}
|
||||
if err != errFoo {
|
||||
t.Errorf("err want %v got %v", errFoo, err)
|
||||
}
|
||||
assert.Equal(t, 1, dp.called)
|
||||
assert.Equal(t, errFoo, err)
|
||||
}
|
||||
|
||||
func Test_callRetry(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond)
|
||||
p := New(CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.call(dp.fn, 10)
|
||||
if dp.called != 10 {
|
||||
t.Errorf("called want %d got %d", 10, dp.called)
|
||||
}
|
||||
if err == errFoo {
|
||||
t.Errorf("err didn't want %v got %v", errFoo, err)
|
||||
}
|
||||
_, ok := err.(fserrors.Retrier)
|
||||
if !ok {
|
||||
t.Errorf("didn't return a retry error")
|
||||
}
|
||||
assert.Equal(t, 10, dp.called)
|
||||
assert.Equal(t, errFoo, err)
|
||||
}
|
||||
|
||||
func TestCall(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond).SetRetries(20)
|
||||
p := New(RetriesOption(20), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.Call(dp.fn)
|
||||
if dp.called != 20 {
|
||||
t.Errorf("called want %d got %d", 20, dp.called)
|
||||
}
|
||||
_, ok := err.(fserrors.Retrier)
|
||||
if !ok {
|
||||
t.Errorf("didn't return a retry error")
|
||||
}
|
||||
assert.Equal(t, 20, dp.called)
|
||||
assert.Equal(t, errFoo, err)
|
||||
}
|
||||
|
||||
func TestCallNoRetry(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond).SetRetries(20)
|
||||
func TestCallParallel(t *testing.T) {
|
||||
p := New(MaxConnectionsOption(3), RetriesOption(1), CalculatorOption(NewDefault(MinSleep(100*time.Microsecond), MaxSleep(1*time.Millisecond))))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.CallNoRetry(dp.fn)
|
||||
if dp.called != 1 {
|
||||
t.Errorf("called want %d got %d", 1, dp.called)
|
||||
wait := sync.NewCond(&sync.Mutex{})
|
||||
funcs := make([]*dummyPaced, 5)
|
||||
for i := range funcs {
|
||||
dp := &dummyPaced{wait: wait}
|
||||
funcs[i] = dp
|
||||
go func() {
|
||||
assert.Equal(t, errFoo, p.CallNoRetry(dp.fn))
|
||||
}()
|
||||
}
|
||||
_, ok := err.(fserrors.Retrier)
|
||||
if !ok {
|
||||
t.Errorf("didn't return a retry error")
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
called := 0
|
||||
wait.L.Lock()
|
||||
for _, dp := range funcs {
|
||||
called += dp.called
|
||||
}
|
||||
wait.L.Unlock()
|
||||
|
||||
assert.Equal(t, 3, called)
|
||||
wait.Broadcast()
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
called = 0
|
||||
wait.L.Lock()
|
||||
for _, dp := range funcs {
|
||||
called += dp.called
|
||||
}
|
||||
wait.L.Unlock()
|
||||
|
||||
assert.Equal(t, 5, called)
|
||||
wait.Broadcast()
|
||||
}
|
||||
|
||||
326
lib/pacer/pacers.go
Normal file
326
lib/pacer/pacers.go
Normal file
@@ -0,0 +1,326 @@
|
||||
package pacer
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type (
|
||||
// MinSleep configures the minimum sleep time of a Calculator
|
||||
MinSleep time.Duration
|
||||
// MaxSleep configures the maximum sleep time of a Calculator
|
||||
MaxSleep time.Duration
|
||||
// DecayConstant configures the decay constant time of a Calculator
|
||||
DecayConstant uint
|
||||
// AttackConstant configures the attack constant of a Calculator
|
||||
AttackConstant uint
|
||||
// Burst configures the number of API calls to allow without sleeping
|
||||
Burst int
|
||||
)
|
||||
|
||||
// Default is a truncated exponential attack and decay.
|
||||
//
|
||||
// On retries the sleep time is doubled, on non errors then sleeptime decays
|
||||
// according to the decay constant as set with SetDecayConstant.
|
||||
//
|
||||
// The sleep never goes below that set with SetMinSleep or above that set
|
||||
// with SetMaxSleep.
|
||||
type Default struct {
|
||||
minSleep time.Duration // minimum sleep time
|
||||
maxSleep time.Duration // maximum sleep time
|
||||
decayConstant uint // decay constant
|
||||
attackConstant uint // attack constant
|
||||
}
|
||||
|
||||
// DefaultOption is the interface implemented by all options for the Default Calculator
|
||||
type DefaultOption interface {
|
||||
ApplyDefault(*Default)
|
||||
}
|
||||
|
||||
// NewDefault creates a Calculator used by Pacer as the default.
|
||||
func NewDefault(opts ...DefaultOption) *Default {
|
||||
c := &Default{
|
||||
minSleep: 10 * time.Millisecond,
|
||||
maxSleep: 2 * time.Second,
|
||||
decayConstant: 2,
|
||||
attackConstant: 1,
|
||||
}
|
||||
c.Update(opts...)
|
||||
return c
|
||||
}
|
||||
|
||||
// Update applies the Calculator options.
|
||||
func (c *Default) Update(opts ...DefaultOption) {
|
||||
for _, opt := range opts {
|
||||
opt.ApplyDefault(c)
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyDefault updates the value on the Calculator
|
||||
func (o MinSleep) ApplyDefault(c *Default) {
|
||||
c.minSleep = time.Duration(o)
|
||||
}
|
||||
|
||||
// ApplyDefault updates the value on the Calculator
|
||||
func (o MaxSleep) ApplyDefault(c *Default) {
|
||||
c.maxSleep = time.Duration(o)
|
||||
}
|
||||
|
||||
// ApplyDefault updates the value on the Calculator
|
||||
func (o DecayConstant) ApplyDefault(c *Default) {
|
||||
c.decayConstant = uint(o)
|
||||
}
|
||||
|
||||
// ApplyDefault updates the value on the Calculator
|
||||
func (o AttackConstant) ApplyDefault(c *Default) {
|
||||
c.attackConstant = uint(o)
|
||||
}
|
||||
|
||||
// Calculate takes the current Pacer state and return the wait time until the next try.
|
||||
func (c *Default) Calculate(state State) time.Duration {
|
||||
if t, ok := IsRetryAfter(state.LastError); ok {
|
||||
if t < c.minSleep {
|
||||
return c.minSleep
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
if state.ConsecutiveRetries > 0 {
|
||||
sleepTime := c.maxSleep
|
||||
if c.attackConstant != 0 {
|
||||
sleepTime = (state.SleepTime << c.attackConstant) / ((1 << c.attackConstant) - 1)
|
||||
}
|
||||
if sleepTime > c.maxSleep {
|
||||
sleepTime = c.maxSleep
|
||||
}
|
||||
return sleepTime
|
||||
}
|
||||
sleepTime := (state.SleepTime<<c.decayConstant - state.SleepTime) >> c.decayConstant
|
||||
if sleepTime < c.minSleep {
|
||||
sleepTime = c.minSleep
|
||||
}
|
||||
return sleepTime
|
||||
}
|
||||
|
||||
// AmazonCloudDrive is a specialized pacer for Amazon Drive
|
||||
//
|
||||
// It implements a truncated exponential backoff strategy with randomization.
|
||||
// Normally operations are paced at the interval set with SetMinSleep. On errors
|
||||
// the sleep timer is set to 0..2**retries seconds.
|
||||
//
|
||||
// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
|
||||
type AmazonCloudDrive struct {
|
||||
minSleep time.Duration // minimum sleep time
|
||||
}
|
||||
|
||||
// AmazonCloudDriveOption is the interface implemented by all options for the AmazonCloudDrive Calculator
|
||||
type AmazonCloudDriveOption interface {
|
||||
ApplyAmazonCloudDrive(*AmazonCloudDrive)
|
||||
}
|
||||
|
||||
// NewAmazonCloudDrive returns a new AmazonCloudDrive Calculator with default values
|
||||
func NewAmazonCloudDrive(opts ...AmazonCloudDriveOption) *AmazonCloudDrive {
|
||||
c := &AmazonCloudDrive{
|
||||
minSleep: 10 * time.Millisecond,
|
||||
}
|
||||
c.Update(opts...)
|
||||
return c
|
||||
}
|
||||
|
||||
// Update applies the Calculator options.
|
||||
func (c *AmazonCloudDrive) Update(opts ...AmazonCloudDriveOption) {
|
||||
for _, opt := range opts {
|
||||
opt.ApplyAmazonCloudDrive(c)
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyAmazonCloudDrive updates the value on the Calculator
|
||||
func (o MinSleep) ApplyAmazonCloudDrive(c *AmazonCloudDrive) {
|
||||
c.minSleep = time.Duration(o)
|
||||
}
|
||||
|
||||
// Calculate takes the current Pacer state and return the wait time until the next try.
|
||||
func (c *AmazonCloudDrive) Calculate(state State) time.Duration {
|
||||
if t, ok := IsRetryAfter(state.LastError); ok {
|
||||
if t < c.minSleep {
|
||||
return c.minSleep
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
consecutiveRetries := state.ConsecutiveRetries
|
||||
if consecutiveRetries == 0 {
|
||||
return c.minSleep
|
||||
}
|
||||
if consecutiveRetries > 9 {
|
||||
consecutiveRetries = 9
|
||||
}
|
||||
// consecutiveRetries starts at 1 so
|
||||
// maxSleep is 2**(consecutiveRetries-1) seconds
|
||||
maxSleep := time.Second << uint(consecutiveRetries-1)
|
||||
// actual sleep is random from 0..maxSleep
|
||||
sleepTime := time.Duration(rand.Int63n(int64(maxSleep)))
|
||||
if sleepTime < c.minSleep {
|
||||
sleepTime = c.minSleep
|
||||
}
|
||||
return sleepTime
|
||||
}
|
||||
|
||||
// GoogleDrive is a specialized pacer for Google Drive
|
||||
//
|
||||
// It implements a truncated exponential backoff strategy with randomization.
|
||||
// Normally operations are paced at the interval set with SetMinSleep. On errors
|
||||
// the sleep timer is set to (2 ^ n) + random_number_milliseconds seconds.
|
||||
//
|
||||
// See https://developers.google.com/drive/v2/web/handle-errors#exponential-backoff
|
||||
type GoogleDrive struct {
|
||||
minSleep time.Duration // minimum sleep time
|
||||
burst int // number of requests without sleeping
|
||||
limiter *rate.Limiter // rate limiter for the minSleep
|
||||
}
|
||||
|
||||
// GoogleDriveOption is the interface implemented by all options for the GoogleDrive Calculator
|
||||
type GoogleDriveOption interface {
|
||||
ApplyGoogleDrive(*GoogleDrive)
|
||||
}
|
||||
|
||||
// NewGoogleDrive returns a new GoogleDrive Calculator with default values
|
||||
func NewGoogleDrive(opts ...GoogleDriveOption) *GoogleDrive {
|
||||
c := &GoogleDrive{
|
||||
minSleep: 10 * time.Millisecond,
|
||||
burst: 1,
|
||||
}
|
||||
c.Update(opts...)
|
||||
return c
|
||||
}
|
||||
|
||||
// Update applies the Calculator options.
|
||||
func (c *GoogleDrive) Update(opts ...GoogleDriveOption) {
|
||||
for _, opt := range opts {
|
||||
opt.ApplyGoogleDrive(c)
|
||||
}
|
||||
if c.burst <= 0 {
|
||||
c.burst = 1
|
||||
}
|
||||
c.limiter = rate.NewLimiter(rate.Every(c.minSleep), c.burst)
|
||||
}
|
||||
|
||||
// ApplyGoogleDrive updates the value on the Calculator
|
||||
func (o MinSleep) ApplyGoogleDrive(c *GoogleDrive) {
|
||||
c.minSleep = time.Duration(o)
|
||||
}
|
||||
|
||||
// ApplyGoogleDrive updates the value on the Calculator
|
||||
func (o Burst) ApplyGoogleDrive(c *GoogleDrive) {
|
||||
c.burst = int(o)
|
||||
}
|
||||
|
||||
// Calculate takes the current Pacer state and return the wait time until the next try.
|
||||
func (c *GoogleDrive) Calculate(state State) time.Duration {
|
||||
if t, ok := IsRetryAfter(state.LastError); ok {
|
||||
if t < c.minSleep {
|
||||
return c.minSleep
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
consecutiveRetries := state.ConsecutiveRetries
|
||||
if consecutiveRetries == 0 {
|
||||
return c.limiter.Reserve().Delay()
|
||||
}
|
||||
if consecutiveRetries > 5 {
|
||||
consecutiveRetries = 5
|
||||
}
|
||||
// consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16
|
||||
// maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds
|
||||
return time.Second<<uint(consecutiveRetries-1) + time.Duration(rand.Int63n(int64(time.Second)))
|
||||
}
|
||||
|
||||
// S3 implements a pacer compatible with our expectations of S3, where it tries to not
|
||||
// delay at all between successful calls, but backs off in the default fashion in response
|
||||
// to any errors.
|
||||
// The assumption is that errors should be exceedingly rare (S3 seems to have largely solved
|
||||
// the sort of stability questions rclone is likely to run into), and in the happy case
|
||||
// it can handle calls with no delays between them.
|
||||
//
|
||||
// Basically defaultPacer, but with some handling of sleepTime going to/from 0ms
|
||||
type S3 struct {
|
||||
minSleep time.Duration // minimum sleep time
|
||||
maxSleep time.Duration // maximum sleep time
|
||||
decayConstant uint // decay constant
|
||||
attackConstant uint // attack constant
|
||||
}
|
||||
|
||||
// S3Option is the interface implemented by all options for the S3 Calculator
|
||||
type S3Option interface {
|
||||
ApplyS3(*S3)
|
||||
}
|
||||
|
||||
// NewS3 returns a new S3 Calculator with default values
|
||||
func NewS3(opts ...S3Option) *S3 {
|
||||
c := &S3{
|
||||
maxSleep: 2 * time.Second,
|
||||
decayConstant: 2,
|
||||
attackConstant: 1,
|
||||
}
|
||||
c.Update(opts...)
|
||||
return c
|
||||
}
|
||||
|
||||
// Update applies the Calculator options.
|
||||
func (c *S3) Update(opts ...S3Option) {
|
||||
for _, opt := range opts {
|
||||
opt.ApplyS3(c)
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyS3 updates the value on the Calculator
|
||||
func (o MaxSleep) ApplyS3(c *S3) {
|
||||
c.maxSleep = time.Duration(o)
|
||||
}
|
||||
|
||||
// ApplyS3 updates the value on the Calculator
|
||||
func (o MinSleep) ApplyS3(c *S3) {
|
||||
c.minSleep = time.Duration(o)
|
||||
}
|
||||
|
||||
// ApplyS3 updates the value on the Calculator
|
||||
func (o DecayConstant) ApplyS3(c *S3) {
|
||||
c.decayConstant = uint(o)
|
||||
}
|
||||
|
||||
// ApplyS3 updates the value on the Calculator
|
||||
func (o AttackConstant) ApplyS3(c *S3) {
|
||||
c.attackConstant = uint(o)
|
||||
}
|
||||
|
||||
// Calculate takes the current Pacer state and return the wait time until the next try.
|
||||
func (c *S3) Calculate(state State) time.Duration {
|
||||
if t, ok := IsRetryAfter(state.LastError); ok {
|
||||
if t < c.minSleep {
|
||||
return c.minSleep
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
if state.ConsecutiveRetries > 0 {
|
||||
if c.attackConstant == 0 {
|
||||
return c.maxSleep
|
||||
}
|
||||
if state.SleepTime == 0 {
|
||||
return c.minSleep
|
||||
}
|
||||
sleepTime := (state.SleepTime << c.attackConstant) / ((1 << c.attackConstant) - 1)
|
||||
if sleepTime > c.maxSleep {
|
||||
sleepTime = c.maxSleep
|
||||
}
|
||||
return sleepTime
|
||||
}
|
||||
sleepTime := (state.SleepTime<<c.decayConstant - state.SleepTime) >> c.decayConstant
|
||||
if sleepTime < c.minSleep {
|
||||
sleepTime = 0
|
||||
}
|
||||
return sleepTime
|
||||
}
|
||||
24
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
24
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@@ -137,7 +137,7 @@ func testOnGCE() bool {
|
||||
resc := make(chan bool, 2)
|
||||
|
||||
// Try two strategies in parallel.
|
||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
||||
// See https://github.com/googleapis/google-cloud-go/issues/194
|
||||
go func() {
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
@@ -300,8 +300,8 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", u, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := c.hc.Do(req)
|
||||
@@ -312,13 +312,13 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
@@ -499,3 +499,15 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Error contains an error response from the server.
|
||||
type Error struct {
|
||||
// Code is the HTTP response status code.
|
||||
Code int
|
||||
// Message is the server response message.
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
|
||||
}
|
||||
|
||||
9
vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
generated
vendored
9
vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
generated
vendored
@@ -79,8 +79,13 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share
|
||||
ipRange: v.IPRange,
|
||||
|
||||
// Container/Blob-specific SAS parameters
|
||||
resource: resource,
|
||||
identifier: v.Identifier,
|
||||
resource: resource,
|
||||
identifier: v.Identifier,
|
||||
cacheControl: v.CacheControl,
|
||||
contentDisposition: v.ContentDisposition,
|
||||
contentEncoding: v.ContentEncoding,
|
||||
contentLanguage: v.ContentLanguage,
|
||||
contentType: v.ContentType,
|
||||
|
||||
// Calculated SAS signature
|
||||
signature: signature,
|
||||
|
||||
4
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
generated
vendored
4
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
generated
vendored
@@ -229,7 +229,7 @@ func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedI
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
||||
func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) {
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return c.client.ListBlobFlatSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
|
||||
return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
|
||||
}
|
||||
|
||||
// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty
|
||||
@@ -242,7 +242,7 @@ func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Mark
|
||||
return nil, errors.New("snapshots are not supported in this listing operation")
|
||||
}
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.val, maxResults, include, nil, nil)
|
||||
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil)
|
||||
}
|
||||
|
||||
// ListBlobsSegmentOptions defines options available when calling ListBlobs.
|
||||
|
||||
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
generated
vendored
@@ -80,7 +80,7 @@ func appendToURLPath(u url.URL, name string) url.URL {
|
||||
// https://docs.microsoft.com/rest/api/storageservices/list-containers2.
|
||||
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) {
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return s.client.ListContainersSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
|
||||
return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
|
||||
}
|
||||
|
||||
// ListContainersOptions defines options available when calling ListContainers.
|
||||
|
||||
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
generated
vendored
@@ -1,3 +1,3 @@
|
||||
package azblob
|
||||
|
||||
const serviceLibVersion = "0.3"
|
||||
const serviceLibVersion = "0.5"
|
||||
|
||||
8
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
generated
vendored
8
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
generated
vendored
@@ -109,8 +109,8 @@ func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
|
||||
})
|
||||
}
|
||||
|
||||
// redactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
|
||||
func redactSigQueryParam(rawQuery string) (bool, string) {
|
||||
// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
|
||||
func RedactSigQueryParam(rawQuery string) (bool, string) {
|
||||
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
|
||||
sigFound := strings.Contains(rawQuery, "?sig=")
|
||||
if !sigFound {
|
||||
@@ -131,7 +131,7 @@ func redactSigQueryParam(rawQuery string) (bool, string) {
|
||||
|
||||
func prepareRequestForLogging(request pipeline.Request) *http.Request {
|
||||
req := request
|
||||
if sigFound, rawQuery := redactSigQueryParam(req.URL.RawQuery); sigFound {
|
||||
if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound {
|
||||
// Make copy so we don't destroy the query parameters we actually need to send in the request
|
||||
req = request.Copy()
|
||||
req.Request.URL.RawQuery = rawQuery
|
||||
@@ -161,7 +161,7 @@ func prepareRequestForServiceLogging(request pipeline.Request) *http.Request {
|
||||
req = request.Copy()
|
||||
url, err := url.Parse(req.Header.Get(key))
|
||||
if err == nil {
|
||||
if sigFound, rawQuery := redactSigQueryParam(url.RawQuery); sigFound {
|
||||
if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound {
|
||||
url.RawQuery = rawQuery
|
||||
req.Header.Set(xMsCopySourceHeader, url.String())
|
||||
}
|
||||
|
||||
89
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
generated
vendored
89
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
generated
vendored
@@ -5,6 +5,8 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const CountToEnd = 0
|
||||
@@ -28,6 +30,9 @@ type HTTPGetterInfo struct {
|
||||
ETag ETag
|
||||
}
|
||||
|
||||
// FailedReadNotifier is a function type that represents the notification function called when a read fails
|
||||
type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool)
|
||||
|
||||
// RetryReaderOptions contains properties which can help to decide when to do retry.
|
||||
type RetryReaderOptions struct {
|
||||
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
|
||||
@@ -36,6 +41,20 @@ type RetryReaderOptions struct {
|
||||
MaxRetryRequests int
|
||||
doInjectError bool
|
||||
doInjectErrorRound int
|
||||
|
||||
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
|
||||
NotifyFailedRead FailedReadNotifier
|
||||
|
||||
// TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
|
||||
// retryReader has the following special behaviour: closing the response body before it is all read is treated as a
|
||||
// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
|
||||
// read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If
|
||||
// TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead
|
||||
// treated as a fatal (non-retryable) error.
|
||||
// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
|
||||
// from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
|
||||
// which will be retried.
|
||||
TreatEarlyCloseAsError bool
|
||||
}
|
||||
|
||||
// retryReader implements io.ReaderCloser methods.
|
||||
@@ -45,17 +64,33 @@ type RetryReaderOptions struct {
|
||||
// through reading from the new response.
|
||||
type retryReader struct {
|
||||
ctx context.Context
|
||||
response *http.Response
|
||||
info HTTPGetterInfo
|
||||
countWasBounded bool
|
||||
o RetryReaderOptions
|
||||
getter HTTPGetter
|
||||
|
||||
// we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
|
||||
responseMu *sync.Mutex
|
||||
response *http.Response
|
||||
}
|
||||
|
||||
// NewRetryReader creates a retry reader.
|
||||
func NewRetryReader(ctx context.Context, initialResponse *http.Response,
|
||||
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
|
||||
return &retryReader{ctx: ctx, getter: getter, info: info, countWasBounded: info.Count != CountToEnd, response: initialResponse, o: o}
|
||||
return &retryReader{
|
||||
ctx: ctx,
|
||||
getter: getter,
|
||||
info: info,
|
||||
countWasBounded: info.Count != CountToEnd,
|
||||
response: initialResponse,
|
||||
responseMu: &sync.Mutex{},
|
||||
o: o}
|
||||
}
|
||||
|
||||
func (s *retryReader) setResponse(r *http.Response) {
|
||||
s.responseMu.Lock()
|
||||
defer s.responseMu.Unlock()
|
||||
s.response = r
|
||||
}
|
||||
|
||||
func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||
@@ -66,15 +101,19 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if s.response == nil { // We don't have a response stream to read from, try to get one.
|
||||
response, err := s.getter(s.ctx, s.info)
|
||||
s.responseMu.Lock()
|
||||
resp := s.response
|
||||
s.responseMu.Unlock()
|
||||
if resp == nil { // We don't have a response stream to read from, try to get one.
|
||||
newResponse, err := s.getter(s.ctx, s.info)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Successful GET; this is the network stream we'll read from.
|
||||
s.response = response
|
||||
s.setResponse(newResponse)
|
||||
resp = newResponse
|
||||
}
|
||||
n, err := s.response.Body.Read(p) // Read from the stream
|
||||
n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
|
||||
|
||||
// Injection mechanism for testing.
|
||||
if s.o.doInjectError && try == s.o.doInjectErrorRound {
|
||||
@@ -89,23 +128,49 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
return n, err // Return the return to the caller
|
||||
}
|
||||
s.Close() // Error, close stream
|
||||
s.response = nil // Our stream is no longer good
|
||||
s.Close() // Error, close stream
|
||||
s.setResponse(nil) // Our stream is no longer good
|
||||
|
||||
// Check the retry count and error code, and decide whether to retry.
|
||||
if try >= s.o.MaxRetryRequests {
|
||||
return n, err // All retries exhausted
|
||||
retriesExhausted := try >= s.o.MaxRetryRequests
|
||||
_, isNetError := err.(net.Error)
|
||||
willRetry := (isNetError || s.wasRetryableEarlyClose(err)) && !retriesExhausted
|
||||
|
||||
// Notify, for logging purposes, of any failures
|
||||
if s.o.NotifyFailedRead != nil {
|
||||
failureCount := try + 1 // because try is zero-based
|
||||
s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry)
|
||||
}
|
||||
|
||||
if _, ok := err.(net.Error); ok {
|
||||
if willRetry {
|
||||
continue
|
||||
// Loop around and try to get and read from new stream.
|
||||
}
|
||||
return n, err // Not retryable, just return
|
||||
return n, err // Not retryable, or retries exhausted, so just return
|
||||
}
|
||||
}
|
||||
|
||||
// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry
|
||||
// Is this safe, to close early from another goroutine? Early close ultimately ends up calling
|
||||
// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors"
|
||||
// which is exactly the behaviour we want.
|
||||
// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read)
|
||||
// then there are two different types of error that may happen - either the one one we check for here,
|
||||
// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
|
||||
// to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
|
||||
func (s *retryReader) wasRetryableEarlyClose(err error) bool {
|
||||
if s.o.TreatEarlyCloseAsError {
|
||||
return false // user wants all early closes to be errors, and so not retryable
|
||||
}
|
||||
// unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
|
||||
return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage)
|
||||
}
|
||||
|
||||
const ReadOnClosedBodyMessage = "read on closed response body"
|
||||
|
||||
func (s *retryReader) Close() error {
|
||||
s.responseMu.Lock()
|
||||
defer s.responseMu.Unlock()
|
||||
if s.response != nil && s.response.Body != nil {
|
||||
return s.response.Body.Close()
|
||||
}
|
||||
|
||||
72
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go
generated
vendored
72
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go
generated
vendored
@@ -47,17 +47,22 @@ const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 860
|
||||
// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
|
||||
type SASQueryParameters struct {
|
||||
// All members are immutable or values so copies of this struct are goroutine-safe.
|
||||
version string `param:"sv"`
|
||||
services string `param:"ss"`
|
||||
resourceTypes string `param:"srt"`
|
||||
protocol SASProtocol `param:"spr"`
|
||||
startTime time.Time `param:"st"`
|
||||
expiryTime time.Time `param:"se"`
|
||||
ipRange IPRange `param:"sip"`
|
||||
identifier string `param:"si"`
|
||||
resource string `param:"sr"`
|
||||
permissions string `param:"sp"`
|
||||
signature string `param:"sig"`
|
||||
version string `param:"sv"`
|
||||
services string `param:"ss"`
|
||||
resourceTypes string `param:"srt"`
|
||||
protocol SASProtocol `param:"spr"`
|
||||
startTime time.Time `param:"st"`
|
||||
expiryTime time.Time `param:"se"`
|
||||
ipRange IPRange `param:"sip"`
|
||||
identifier string `param:"si"`
|
||||
resource string `param:"sr"`
|
||||
permissions string `param:"sp"`
|
||||
signature string `param:"sig"`
|
||||
cacheControl string `param:"rscc"`
|
||||
contentDisposition string `param:"rscd"`
|
||||
contentEncoding string `param:"rsce"`
|
||||
contentLanguage string `param:"rscl"`
|
||||
contentType string `param:"rsct"`
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) Version() string {
|
||||
@@ -99,6 +104,26 @@ func (p *SASQueryParameters) Signature() string {
|
||||
return p.signature
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) CacheControl() string {
|
||||
return p.cacheControl
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) ContentDisposition() string {
|
||||
return p.contentDisposition
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) ContentEncoding() string {
|
||||
return p.contentEncoding
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) ContentLanguage() string {
|
||||
return p.contentLanguage
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) ContentType() string {
|
||||
return p.contentType
|
||||
}
|
||||
|
||||
// IPRange represents a SAS IP range's start IP and (optionally) end IP.
|
||||
type IPRange struct {
|
||||
Start net.IP // Not specified if length = 0
|
||||
@@ -155,6 +180,16 @@ func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool
|
||||
p.permissions = val
|
||||
case "sig":
|
||||
p.signature = val
|
||||
case "rscc":
|
||||
p.cacheControl = val
|
||||
case "rscd":
|
||||
p.contentDisposition = val
|
||||
case "rsce":
|
||||
p.contentEncoding = val
|
||||
case "rscl":
|
||||
p.contentLanguage = val
|
||||
case "rsct":
|
||||
p.contentType = val
|
||||
default:
|
||||
isSASKey = false // We didn't recognize the query parameter
|
||||
}
|
||||
@@ -200,6 +235,21 @@ func (p *SASQueryParameters) addToValues(v url.Values) url.Values {
|
||||
if p.signature != "" {
|
||||
v.Add("sig", p.signature)
|
||||
}
|
||||
if p.cacheControl != "" {
|
||||
v.Add("rscc", p.cacheControl)
|
||||
}
|
||||
if p.contentDisposition != "" {
|
||||
v.Add("rscd", p.contentDisposition)
|
||||
}
|
||||
if p.contentEncoding != "" {
|
||||
v.Add("rsce", p.contentEncoding)
|
||||
}
|
||||
if p.contentLanguage != "" {
|
||||
v.Add("rscl", p.contentLanguage)
|
||||
}
|
||||
if p.contentType != "" {
|
||||
v.Add("rsct", p.contentType)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
generated
vendored
@@ -6,13 +6,14 @@ package azblob
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// appendBlobClient is the client for the AppendBlob methods of the Azblob service.
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
generated
vendored
@@ -8,13 +8,14 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// blockBlobClient is the client for the BlockBlob methods of the Azblob service.
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
generated
vendored
@@ -4,8 +4,9 @@ package azblob
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go
generated
vendored
@@ -7,13 +7,14 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// containerClient is the client for the Container methods of the Azblob service.
|
||||
|
||||
6
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
generated
vendored
6
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
generated
vendored
@@ -55,7 +55,7 @@ func (md *Metadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
|
||||
// Marker represents an opaque value used in paged responses.
|
||||
type Marker struct {
|
||||
val *string
|
||||
Val *string
|
||||
}
|
||||
|
||||
// NotDone returns true if the list enumeration should be started or is not yet complete. Specifically, NotDone returns true
|
||||
@@ -63,14 +63,14 @@ type Marker struct {
|
||||
// the service. NotDone also returns true whenever the service returns an interim result portion. NotDone returns false only
|
||||
// after the service has returned the final result portion.
|
||||
func (m Marker) NotDone() bool {
|
||||
return m.val == nil || *m.val != ""
|
||||
return m.Val == nil || *m.Val != ""
|
||||
}
|
||||
|
||||
// UnmarshalXML implements the xml.Unmarshaler interface for Marker.
|
||||
func (m *Marker) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var out string
|
||||
err := d.DecodeElement(&out, &start)
|
||||
m.val = &out
|
||||
m.Val = &out
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
generated
vendored
@@ -7,13 +7,14 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// pageBlobClient is the client for the PageBlob methods of the Azblob service.
|
||||
|
||||
@@ -7,8 +7,9 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
type responder func(resp pipeline.Response) (result pipeline.Response, err error)
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go
generated
vendored
@@ -6,9 +6,10 @@ package azblob
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// if you want to provide custom error handling set this variable to your constructor function
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go
generated
vendored
@@ -7,12 +7,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// serviceClient is the client for the Service methods of the Azblob service.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user