mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
40 Commits
fix-bitrix
...
fix-b2-err
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ee87bf19c8 | ||
|
|
cb30a8c80e | ||
|
|
629a3eeca2 | ||
|
|
f52ae75a51 | ||
|
|
9d5c5bf7ab | ||
|
|
53573b4a09 | ||
|
|
3622e064f5 | ||
|
|
6d28ea7ab5 | ||
|
|
b9fd02039b | ||
|
|
1a41c930f3 | ||
|
|
ddb7eb6e0a | ||
|
|
c114695a66 | ||
|
|
fcba51557f | ||
|
|
9393225a1d | ||
|
|
3d3ff61f74 | ||
|
|
d98f192425 | ||
|
|
54771e4402 | ||
|
|
dc286529bc | ||
|
|
7dc7c021db | ||
|
|
fe1aa13069 | ||
|
|
5fa8e7d957 | ||
|
|
9db7c51eaa | ||
|
|
3859fe2f52 | ||
|
|
0caf417779 | ||
|
|
9eab258ffb | ||
|
|
7df57cd625 | ||
|
|
1fd9b483c8 | ||
|
|
93353c431b | ||
|
|
886dfd23e2 | ||
|
|
116a8021bb | ||
|
|
9e2fbe0f1a | ||
|
|
6d65d116df | ||
|
|
edaeb51ea9 | ||
|
|
6e2e2d9eb2 | ||
|
|
20e15e52a9 | ||
|
|
d0f8b4f479 | ||
|
|
58d82a5c73 | ||
|
|
c0c74003f2 | ||
|
|
60bc7a079a | ||
|
|
20c5ca08fb |
33
.github/ISSUE_TEMPLATE/Bug.md
vendored
33
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -5,19 +5,31 @@ about: Report a problem with rclone
|
||||
|
||||
<!--
|
||||
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
**STOP and READ**
|
||||
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
||||
Please show the effort you've put in to solving the problem and please be specific.
|
||||
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
||||
|
||||
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
||||
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||
|
||||
If you can still replicate it or just got a question then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue for a quick response.
|
||||
for a quick response instead of filing an issue on this repo.
|
||||
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
If nothing else helps, then please fill in the info below which helps us help you.
|
||||
|
||||
https://beta.rclone.org/
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
**DO NOT REDACT** any information except passwords/keys/personal info.
|
||||
|
||||
You should use 3 backticks to begin and end your paste to make it readable.
|
||||
|
||||
Make sure to include a log obtained with '-vv'.
|
||||
|
||||
You can also use '-vv --log-file bug.log' and a service such as https://pastebin.com or https://gist.github.com/
|
||||
|
||||
Thank you
|
||||
|
||||
@@ -25,6 +37,11 @@ The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### The associated forum post URL from `https://forum.rclone.org`
|
||||
|
||||
|
||||
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
|
||||
@@ -37,7 +54,7 @@ The Rclone Developers
|
||||
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (e.g. Google Drive)
|
||||
#### Which cloud storage system are you using? (e.g. Google Drive)
|
||||
|
||||
|
||||
|
||||
|
||||
16
.github/ISSUE_TEMPLATE/Feature.md
vendored
16
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -7,12 +7,16 @@ about: Suggest a new feature or enhancement for rclone
|
||||
|
||||
Welcome :-)
|
||||
|
||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
So you've got an idea to improve rclone? We love that!
|
||||
You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
|
||||
Here is a checklist of things to do:
|
||||
Probably the latest beta (or stable) release has your feature, so try to update your rclone.
|
||||
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||
|
||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum first: https://forum.rclone.org/
|
||||
If it still isn't there, here is a checklist of things to do:
|
||||
|
||||
1. Search the old issues for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum: https://forum.rclone.org/
|
||||
3. Make a feature request issue (this is the right place!).
|
||||
4. Be prepared to get involved making the feature :-)
|
||||
|
||||
@@ -23,6 +27,10 @@ The Rclone Developers
|
||||
-->
|
||||
|
||||
|
||||
#### The associated forum post URL from `https://forum.rclone.org`
|
||||
|
||||
|
||||
|
||||
#### What is your current rclone version (output from `rclone version`)?
|
||||
|
||||
|
||||
|
||||
@@ -33,10 +33,11 @@ page](https://github.com/rclone/rclone).
|
||||
|
||||
Now in your terminal
|
||||
|
||||
go get -u github.com/rclone/rclone
|
||||
cd $GOPATH/src/github.com/rclone/rclone
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
go build
|
||||
|
||||
Make a branch to add your new feature
|
||||
|
||||
|
||||
1289
MANUAL.html
generated
1289
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
1867
MANUAL.txt
generated
1867
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
@@ -344,11 +345,17 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error reading error out of body")
|
||||
}
|
||||
// Decode error response
|
||||
errResponse := new(api.Error)
|
||||
err := rest.DecodeJSON(resp, &errResponse)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||
if len(body) > 0 {
|
||||
err := json.Unmarshal(body, &errResponse)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||
}
|
||||
}
|
||||
if errResponse.Code == "" {
|
||||
errResponse.Code = "unknown"
|
||||
@@ -357,7 +364,7 @@ func errorHandler(resp *http.Response) error {
|
||||
errResponse.Status = resp.StatusCode
|
||||
}
|
||||
if errResponse.Message == "" {
|
||||
errResponse.Message = "Unknown " + resp.Status
|
||||
errResponse.Message = fmt.Sprintf("Unknown: %s: %s", resp.Status, body)
|
||||
}
|
||||
return errResponse
|
||||
}
|
||||
|
||||
@@ -2959,12 +2959,12 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
}
|
||||
|
||||
// List all team drives
|
||||
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.TeamDrive, err error) {
|
||||
drives = []*drive.TeamDrive{}
|
||||
listTeamDrives := f.svc.Teamdrives.List().PageSize(100)
|
||||
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.Drive, err error) {
|
||||
drives = []*drive.Drive{}
|
||||
listTeamDrives := f.svc.Drives.List().PageSize(100)
|
||||
var defaultFs Fs // default Fs with default Options
|
||||
for {
|
||||
var teamDrives *drive.TeamDriveList
|
||||
var teamDrives *drive.DriveList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
teamDrives, err = listTeamDrives.Context(ctx).Do()
|
||||
return defaultFs.shouldRetry(ctx, err)
|
||||
@@ -2972,7 +2972,7 @@ func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.TeamDrive, err
|
||||
if err != nil {
|
||||
return drives, errors.Wrap(err, "listing Team Drives failed")
|
||||
}
|
||||
drives = append(drives, teamDrives.TeamDrives...)
|
||||
drives = append(drives, teamDrives.Drives...)
|
||||
if teamDrives.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
@@ -3069,7 +3069,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
return err
|
||||
}
|
||||
if destLeaf == "" {
|
||||
destLeaf = info.Name
|
||||
destLeaf = path.Base(o.Remote())
|
||||
}
|
||||
if destDir == "" {
|
||||
destDir = "."
|
||||
|
||||
@@ -1084,13 +1084,30 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
Path: absPath,
|
||||
// FIXME this gives settings_error/not_authorized/.. errors
|
||||
// and the expires setting isn't in the documentation so remove
|
||||
// for now.
|
||||
// Settings: &sharing.SharedLinkSettings{
|
||||
// Expires: time.Now().Add(time.Duration(expire)).UTC().Round(time.Second),
|
||||
// },
|
||||
Settings: &sharing.SharedLinkSettings{
|
||||
RequestedVisibility: &sharing.RequestedVisibility{
|
||||
Tagged: dropbox.Tagged{Tag: sharing.RequestedVisibilityPublic},
|
||||
},
|
||||
Audience: &sharing.LinkAudience{
|
||||
Tagged: dropbox.Tagged{Tag: sharing.LinkAudiencePublic},
|
||||
},
|
||||
Access: &sharing.RequestedLinkAccessLevel{
|
||||
Tagged: dropbox.Tagged{Tag: sharing.RequestedLinkAccessLevelViewer},
|
||||
},
|
||||
},
|
||||
}
|
||||
if expire < fs.DurationOff {
|
||||
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
|
||||
createArg.Settings.Expires = expiryTime
|
||||
}
|
||||
// FIXME note we can't set Settings for non enterprise dropbox
|
||||
// because of https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
|
||||
// however this only goes wrong when we set Expires, so as a
|
||||
// work-around remove Settings unless expire is set.
|
||||
if expire == fs.DurationOff {
|
||||
createArg.Settings = nil
|
||||
}
|
||||
|
||||
var linkRes sharing.IsSharedLinkMetadata
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
||||
|
||||
@@ -348,8 +348,10 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(fileUploadResponse.Links) != 1 {
|
||||
return nil, errors.New("unexpected amount of files")
|
||||
if len(fileUploadResponse.Links) == 0 {
|
||||
return nil, errors.New("upload response not found")
|
||||
} else if len(fileUploadResponse.Links) > 1 {
|
||||
fs.Debugf(remote, "Multiple upload responses found, using the first")
|
||||
}
|
||||
|
||||
link := fileUploadResponse.Links[0]
|
||||
|
||||
@@ -241,23 +241,6 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
type dialCtx struct {
|
||||
f *Fs
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// dial a new connection with fshttp dialer
|
||||
func (d *dialCtx) dial(network, address string) (net.Conn, error) {
|
||||
conn, err := fshttp.NewDialer(d.ctx).Dial(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d.f.tlsConf != nil {
|
||||
conn = tls.Client(conn, d.f.tlsConf)
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
@@ -277,9 +260,22 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
dCtx := dialCtx{f, ctx}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
|
||||
if f.opt.ExplicitTLS {
|
||||
|
||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||
if f.tlsConf != nil && err == nil {
|
||||
conn = tls.Client(conn, f.tlsConf)
|
||||
}
|
||||
return
|
||||
}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
|
||||
|
||||
if f.opt.TLS {
|
||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||
// as a trigger for sending PSBZ and PROT options to server.
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||
// Initial connection needs to be cleartext for explicit TLS
|
||||
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||
|
||||
@@ -361,6 +361,11 @@ This will only work if you are copying between two OneDrive *Personal* drives AN
|
||||
the files to copy are already shared between them. In other cases, rclone will
|
||||
fall back to normal copy (which will be slightly slower).`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: "Size of listing chunk.",
|
||||
Default: 1000,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_versions",
|
||||
Default: false,
|
||||
@@ -468,6 +473,7 @@ type Options struct {
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
NoVersions bool `config:"no_versions"`
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
@@ -896,7 +902,7 @@ type listAllFn func(*api.Item) bool
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := f.newOptsCall(dirID, "GET", "/children?$top=1000")
|
||||
opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
|
||||
OUTER:
|
||||
for {
|
||||
var result api.ListChildrenResponse
|
||||
@@ -1423,7 +1429,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
Password: f.opt.LinkPassword,
|
||||
}
|
||||
|
||||
if expire < fs.Duration(time.Hour*24*365*100) {
|
||||
if expire < fs.DurationOff {
|
||||
expiry := time.Now().Add(time.Duration(expire))
|
||||
share.Expiry = &expiry
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -286,6 +287,7 @@ type Fs struct {
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
savedpswd string
|
||||
transfers int32 // count in use references
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -348,6 +350,23 @@ func (c *conn) closed() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Show that we are doing an upload or download
|
||||
//
|
||||
// Call removeTransfer() when done
|
||||
func (f *Fs) addTransfer() {
|
||||
atomic.AddInt32(&f.transfers, 1)
|
||||
}
|
||||
|
||||
// Show the upload or download done
|
||||
func (f *Fs) removeTransfer() {
|
||||
atomic.AddInt32(&f.transfers, -1)
|
||||
}
|
||||
|
||||
// getTransfers shows whether there are any transfers in progress
|
||||
func (f *Fs) getTransfers() int32 {
|
||||
return atomic.LoadInt32(&f.transfers)
|
||||
}
|
||||
|
||||
// Open a new connection to the SFTP server.
|
||||
func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
// Rate limit rate of new connections
|
||||
@@ -395,8 +414,12 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
|
||||
opts = opts[:len(opts):len(opts)] // make sure we don't overwrite the callers opts
|
||||
opts = append(opts,
|
||||
sftp.UseFstat(f.opt.UseFstat),
|
||||
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
|
||||
// FIXME disabled after library reversion
|
||||
// sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
|
||||
)
|
||||
if f.opt.DisableConcurrentReads { // FIXME
|
||||
fs.Errorf(f, "Ignoring disable_concurrent_reads after library reversion - see #5197")
|
||||
}
|
||||
|
||||
return sftp.NewClientPipe(pr, pw, opts...)
|
||||
}
|
||||
@@ -474,6 +497,13 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
if transfers := f.getTransfers(); transfers != 0 {
|
||||
fs.Debugf(f, "Not closing %d unused connections as %d transfers in progress", len(f.pool), transfers)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Stop()
|
||||
}
|
||||
@@ -1380,18 +1410,22 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// objectReader represents a file open for reading on the SFTP server
|
||||
type objectReader struct {
|
||||
f *Fs
|
||||
sftpFile *sftp.File
|
||||
pipeReader *io.PipeReader
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func newObjectReader(sftpFile *sftp.File) *objectReader {
|
||||
func (f *Fs) newObjectReader(sftpFile *sftp.File) *objectReader {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
file := &objectReader{
|
||||
f: f,
|
||||
sftpFile: sftpFile,
|
||||
pipeReader: pipeReader,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
// Show connection in use
|
||||
f.addTransfer()
|
||||
|
||||
go func() {
|
||||
// Use sftpFile.WriteTo to pump data so that it gets a
|
||||
@@ -1421,6 +1455,8 @@ func (file *objectReader) Close() (err error) {
|
||||
_ = file.pipeReader.Close()
|
||||
// Wait for the background process to finish
|
||||
<-file.done
|
||||
// Show connection no longer in use
|
||||
file.f.removeTransfer()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1454,12 +1490,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, errors.Wrap(err, "Open Seek failed")
|
||||
}
|
||||
}
|
||||
in = readers.NewLimitedReadCloser(newObjectReader(sftpFile), limit)
|
||||
in = readers.NewLimitedReadCloser(o.fs.newObjectReader(sftpFile), limit)
|
||||
return in, nil
|
||||
}
|
||||
|
||||
// Update a remote sftp file using the data <in> and ModTime from <src>
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
o.fs.addTransfer() // Show transfer in progress
|
||||
defer o.fs.removeTransfer()
|
||||
// Clear the hash cache since we are about to update the object
|
||||
o.md5sum = nil
|
||||
o.sha1sum = nil
|
||||
|
||||
@@ -125,7 +125,7 @@ func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieRespo
|
||||
return nil, errors.Wrap(err, "Error while constructing endpoint URL")
|
||||
}
|
||||
|
||||
u, err := url.Parse("https://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
|
||||
u, err := url.Parse(spRoot.Scheme + "://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error while constructing login URL")
|
||||
}
|
||||
|
||||
@@ -96,6 +96,11 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if fs.GetConfig(ctx).AutoConfirm {
|
||||
return
|
||||
}
|
||||
|
||||
if err = setupRoot(ctx, name, m); err != nil {
|
||||
log.Fatalf("Failed to configure root directory: %v", err)
|
||||
}
|
||||
@@ -161,7 +166,7 @@ type Object struct {
|
||||
|
||||
func setupRegion(m configmap.Mapper) {
|
||||
region, ok := m.Get("region")
|
||||
if !ok {
|
||||
if !ok || region == "" {
|
||||
log.Fatalf("No region set\n")
|
||||
}
|
||||
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
|
||||
|
||||
@@ -44,10 +44,10 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "about remote:",
|
||||
Short: `Get quota information from the remote.`,
|
||||
Long: `
|
||||
` + "`rclone about`" + `prints quota information about a remote to standard
|
||||
` + "`rclone about`" + ` prints quota information about a remote to standard
|
||||
output. The output is typically used, free, quota and trash contents.
|
||||
|
||||
E.g. Typical output from` + "`rclone about remote:`" + `is:
|
||||
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
|
||||
|
||||
Total: 17G
|
||||
Used: 7.444G
|
||||
@@ -75,7 +75,7 @@ Applying a ` + "`--full`" + ` flag to the command prints the bytes in full, e.g.
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
|
||||
A ` + "`--json`" + `flag generates conveniently computer readable output, e.g.
|
||||
A ` + "`--json`" + ` flag generates conveniently computer readable output, e.g.
|
||||
|
||||
{
|
||||
"total": 18253611008,
|
||||
|
||||
13
cmd/cmd.go
13
cmd/cmd.go
@@ -75,8 +75,19 @@ const (
|
||||
|
||||
// ShowVersion prints the version to stdout
|
||||
func ShowVersion() {
|
||||
osVersion, osKernel := buildinfo.GetOSVersion()
|
||||
if osVersion == "" {
|
||||
osVersion = "unknown"
|
||||
}
|
||||
if osKernel == "" {
|
||||
osKernel = "unknown"
|
||||
}
|
||||
|
||||
linking, tagString := buildinfo.GetLinkingAndTags()
|
||||
|
||||
fmt.Printf("rclone %s\n", fs.Version)
|
||||
fmt.Printf("- os/version: %s\n", osVersion)
|
||||
fmt.Printf("- os/kernel: %s\n", osKernel)
|
||||
fmt.Printf("- os/type: %s\n", runtime.GOOS)
|
||||
fmt.Printf("- os/arch: %s\n", runtime.GOARCH)
|
||||
fmt.Printf("- go/version: %s\n", runtime.Version())
|
||||
@@ -553,7 +564,7 @@ func Main() {
|
||||
setupRootCommand(Root)
|
||||
AddBackendFlags()
|
||||
if err := Root.Execute(); err != nil {
|
||||
if strings.HasPrefix(err.Error(), "unknown command") {
|
||||
if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled {
|
||||
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
|
||||
}
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/buildinfo"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
@@ -35,6 +36,7 @@ func init() {
|
||||
cmd.Aliases = append(cmd.Aliases, "cmount")
|
||||
}
|
||||
mountlib.AddRc("cmount", mount)
|
||||
buildinfo.Tags = append(buildinfo.Tags, "cmount")
|
||||
}
|
||||
|
||||
// Find the option string in the current options
|
||||
|
||||
@@ -36,7 +36,7 @@ var commandDefinition = &cobra.Command{
|
||||
Download a URL's content and copy it to the destination without saving
|
||||
it in temporary storage.
|
||||
|
||||
Setting ` + "`--auto-filename`" + `will cause the file name to be retrieved from
|
||||
Setting ` + "`--auto-filename`" + ` will cause the file name to be retrieved from
|
||||
the from URL (after any redirections) and used in the destination
|
||||
path. With ` + "`--print-filename`" + ` in addition, the resuling file name will
|
||||
be printed.
|
||||
|
||||
@@ -3,7 +3,6 @@ package link
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -13,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
expire = fs.Duration(time.Hour * 24 * 365 * 100)
|
||||
expire = fs.DurationOff
|
||||
unlink = false
|
||||
)
|
||||
|
||||
|
||||
@@ -334,7 +334,7 @@ metadata about files like in UNIX. One case that may arise is that other program
|
||||
(incorrectly) interprets this as the file being accessible by everyone. For example
|
||||
an SSH client may warn about "unprotected private key file".
|
||||
|
||||
WinFsp 2021 (version 1.9, still in beta) introduces a new FUSE option "FileSecurity",
|
||||
WinFsp 2021 (version 1.9) introduces a new FUSE option "FileSecurity",
|
||||
that allows the complete specification of file security descriptors using
|
||||
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
|
||||
With this you can work around issues such as the mentioned "unprotected private key file"
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build !noselfupdate
|
||||
|
||||
package selfupdate
|
||||
|
||||
// Note: "|" will be replaced by backticks in the help string below
|
||||
@@ -27,7 +29,7 @@ If the old version contains only dots and digits (for example |v1.54.0|)
|
||||
then it's a stable release so you won't need the |--beta| flag. Beta releases
|
||||
have an additional information similar to |v1.54.0-beta.5111.06f1c0c61|.
|
||||
(if you are a developer and use a locally built rclone, the version number
|
||||
will end with |-DEV|, you will have to rebuild it as it obvisously can't
|
||||
will end with |-DEV|, you will have to rebuild it as it obviously can't
|
||||
be distributed).
|
||||
|
||||
If you previously installed rclone via a package manager, the package may
|
||||
|
||||
11
cmd/selfupdate/noselfupdate.go
Normal file
11
cmd/selfupdate/noselfupdate.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build noselfupdate
|
||||
|
||||
package selfupdate
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/lib/buildinfo"
|
||||
)
|
||||
|
||||
func init() {
|
||||
buildinfo.Tags = append(buildinfo.Tags, "noselfupdate")
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build !noselfupdate
|
||||
|
||||
package selfupdate
|
||||
|
||||
import (
|
||||
@@ -143,14 +145,9 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
|
||||
return errors.New("--stable and --beta are mutually exclusive")
|
||||
}
|
||||
|
||||
gotCmount := false
|
||||
for _, tag := range buildinfo.Tags {
|
||||
if tag == "cmount" {
|
||||
gotCmount = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if gotCmount && !cmount.ProvidedBy(runtime.GOOS) {
|
||||
// The `cmount` tag is added by cmd/cmount/mount.go only if build is static.
|
||||
_, tags := buildinfo.GetLinkingAndTags()
|
||||
if strings.Contains(" "+tags+" ", " cmount ") && !cmount.ProvidedBy(runtime.GOOS) {
|
||||
return errors.New("updating would discard the mount FUSE capability, aborting")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build !noselfupdate
|
||||
|
||||
package selfupdate
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build !noselfupdate
|
||||
|
||||
package selfupdate
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// +build !windows,!plan9,!js
|
||||
// +build !noselfupdate
|
||||
|
||||
package selfupdate
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// +build plan9 js
|
||||
// +build !noselfupdate
|
||||
|
||||
package selfupdate
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// +build windows
|
||||
// +build !noselfupdate
|
||||
|
||||
package selfupdate
|
||||
|
||||
|
||||
5
cmd/selfupdate_disabled.go
Normal file
5
cmd/selfupdate_disabled.go
Normal file
@@ -0,0 +1,5 @@
|
||||
// +build noselfupdate
|
||||
|
||||
package cmd
|
||||
|
||||
const selfupdateEnabled = false
|
||||
7
cmd/selfupdate_enabled.go
Normal file
7
cmd/selfupdate_enabled.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// +build !noselfupdate
|
||||
|
||||
package cmd
|
||||
|
||||
// This constant must be in the `cmd` package rather than `cmd/selfupdate`
|
||||
// to prevent build failure due to dependency loop.
|
||||
const selfupdateEnabled = true
|
||||
@@ -29,13 +29,16 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: `Show the version number.`,
|
||||
Long: `
|
||||
Show the rclone version number, the go version, the build target OS and
|
||||
architecture, build tags and the type of executable (static or dynamic).
|
||||
Show the rclone version number, the go version, the build target
|
||||
OS and architecture, the runtime OS and kernel version and bitness,
|
||||
build tags and the type of executable (static or dynamic).
|
||||
|
||||
For example:
|
||||
|
||||
$ rclone version
|
||||
rclone v1.54
|
||||
rclone v1.55.0
|
||||
- os/version: ubuntu 18.04 (64 bit)
|
||||
- os/kernel: 4.15.0-136-generic (x86_64)
|
||||
- os/type: linux
|
||||
- os/arch: amd64
|
||||
- go/version: go1.16
|
||||
|
||||
@@ -477,3 +477,5 @@ put them back in again.` >}}
|
||||
* Lucas Messenger <lmesseng@cisco.com>
|
||||
* Manish Kumar <krmanish260@gmail.com>
|
||||
* x0b <x0bdev@gmail.com>
|
||||
* CERN through the CS3MESH4EOSC Project
|
||||
* Nick Gaya <nicholasgaya+github@gmail.com>
|
||||
|
||||
@@ -392,6 +392,22 @@ See: the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
- Type: MultiEncoder
|
||||
- Default: Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8
|
||||
|
||||
#### --azureblob-public-access
|
||||
|
||||
Public access level of a container: blob, container.
|
||||
|
||||
- Config: public_access
|
||||
- Env Var: RCLONE_AZUREBLOB_PUBLIC_ACCESS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- ""
|
||||
- The container and its blobs can be accessed only with an authorized request. It's a default value
|
||||
- "blob"
|
||||
- Blob data within this container can be read via anonymous request.
|
||||
- "container"
|
||||
- Allow full public read access for container and blob data.
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
### Limitations ###
|
||||
|
||||
|
||||
@@ -5,6 +5,160 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.55.0 - 2021-03-31
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.54.0...v1.55.0)
|
||||
|
||||
* New commands
|
||||
* [selfupdate](/commands/rclone_selfupdate/) (Ivan Andreev)
|
||||
* Allows rclone to update itself in-place or via a package (using `--package` flag)
|
||||
* Reads cryptographically signed signatures for non beta releases
|
||||
* Works on all OSes.
|
||||
* [test](/commands/rclone_test/) - these are test commands - use with care!
|
||||
* `histogram` - Makes a histogram of file name characters.
|
||||
* `info` - Discovers file name or other limitations for paths.
|
||||
* `makefiles` - Make a random file hierarchy for testing.
|
||||
* `memory` - Load all the objects at remote:path into memory and report memory stats.
|
||||
* New Features
|
||||
* [Connection strings](/docs/#connection-strings)
|
||||
* Config parameters can now be passed as part of the remote name as a connection string.
|
||||
* For example to do the equivalent of `--drive-shared-with-me` use `drive,shared_with_me:`
|
||||
* Make sure we don't save on the fly remote config to the config file (Nick Craig-Wood)
|
||||
* Make sure backends with additional config have a different name for caching (Nick Craig-Wood)
|
||||
* This work was sponsored by CERN, through the [CS3MESH4EOSC Project](https://cs3mesh4eosc.eu/).
|
||||
* CS3MESH4EOSC has received funding from the European Union’s Horizon 2020
|
||||
* research and innovation programme under Grant Agreement no. 863353.
|
||||
* build
|
||||
* Update go build version to go1.16 and raise minimum go version to go1.13 (Nick Craig-Wood)
|
||||
* Make a macOS ARM64 build to support Apple Silicon (Nick Craig-Wood)
|
||||
* Install macfuse 4.x instead of osxfuse 3.x (Nick Craig-Wood)
|
||||
* Use `GO386=softfloat` instead of deprecated `GO386=387` for 386 builds (Nick Craig-Wood)
|
||||
* Disable IOS builds for the time being (Nick Craig-Wood)
|
||||
* Androids builds made with up to date NDK (x0b)
|
||||
* Add an rclone user to the Docker image but don't use it by default (cynthia kwok)
|
||||
* dedupe: Make largest directory primary to minimize data moved (Saksham Khanna)
|
||||
* config
|
||||
* Wrap config library in an interface (Fionera)
|
||||
* Make config file system pluggable (Nick Craig-Wood)
|
||||
* `--config ""` or `"/notfound"` for in memory config only (Nick Craig-Wood)
|
||||
* Clear fs cache of stale entries when altering config (Nick Craig-Wood)
|
||||
* copyurl: Add option to print resulting auto-filename (albertony)
|
||||
* delete: Make `--rmdirs` obey the filters (Nick Craig-Wood)
|
||||
* docs - many fixes and reworks from edwardxml, albertony, pvalls, Ivan Andreev, Evan Harris, buengese, Alexey Tabakman
|
||||
* encoder/filename - add SCSU as tables (Klaus Post)
|
||||
* Add multiple paths support to `--compare-dest` and `--copy-dest` flag (K265)
|
||||
* filter: Make `--exclude "dir/"` equivalent to `--exclude "dir/**"` (Nick Craig-Wood)
|
||||
* fshttp: Add DSCP support with `--dscp` for QoS with differentiated services (Max Sum)
|
||||
* lib/cache: Add Delete and DeletePrefix methods (Nick Craig-Wood)
|
||||
* lib/file
|
||||
* Make pre-allocate detect disk full errors and return them (Nick Craig-Wood)
|
||||
* Don't run preallocate concurrently (Nick Craig-Wood)
|
||||
* Retry preallocate on EINTR (Nick Craig-Wood)
|
||||
* operations: Made copy and sync operations obey a RetryAfterError (Ankur Gupta)
|
||||
* rc
|
||||
* Add string alternatives for setting options over the rc (Nick Craig-Wood)
|
||||
* Add `options/local` to see the options configured in the context (Nick Craig-Wood)
|
||||
* Add `_config` parameter to set global config for just this rc call (Nick Craig-Wood)
|
||||
* Implement passing filter config with `_filter` parameter (Nick Craig-Wood)
|
||||
* Add `fscache/clear` and `fscache/entries` to control the fs cache (Nick Craig-Wood)
|
||||
* Avoid +Inf value for speed in `core/stats` (albertony)
|
||||
* Add a full set of stats to `core/stats` (Nick Craig-Wood)
|
||||
* Allow `fs=` params to be a JSON blob (Nick Craig-Wood)
|
||||
* rcd: Added systemd notification during the `rclone rcd` command. (Naveen Honest Raj)
|
||||
* rmdirs: Make `--rmdirs` obey the filters (Nick Craig-Wood)
|
||||
* version: Show build tags and type of executable (Ivan Andreev)
|
||||
* Bug Fixes
|
||||
* install.sh: make it fail on download errors (Ivan Andreev)
|
||||
* Fix excessive retries missing `--max-duration` timeout (Nick Craig-Wood)
|
||||
* Fix crash when `--low-level-retries=0` (Nick Craig-Wood)
|
||||
* Fix failed token refresh on mounts created via the rc (Nick Craig-Wood)
|
||||
* fshttp: Fix bandwidth limiting after bad merge (Nick Craig-Wood)
|
||||
* lib/atexit
|
||||
* Unregister interrupt handler once it has fired so users can interrupt again (Nick Craig-Wood)
|
||||
* Fix occasional failure to unmount with CTRL-C (Nick Craig-Wood)
|
||||
* Fix deadlock calling Finalise while Run is running (Nick Craig-Wood)
|
||||
* lib/rest: Fix multipart uploads not stopping on context cancel (Nick Craig-Wood)
|
||||
* Mount
|
||||
* Allow mounting to root directory on windows (albertony)
|
||||
* Improved handling of relative paths on windows (albertony)
|
||||
* Fix unicode issues with accented characters on macOS (Nick Craig-Wood)
|
||||
* Docs: document the new FileSecurity option in WinFsp 2021 (albertony)
|
||||
* Docs: add note about volume path syntax on windows (albertony)
|
||||
* Fix caching of old directories after renaming them (Nick Craig-Wood)
|
||||
* Update cgofuse to the latest version to bring in macfuse 4 fix (Nick Craig-Wood)
|
||||
* VFS
|
||||
* `--vfs-used-is-size` to report used space using recursive scan (tYYGH)
|
||||
* Don't set modification time if it was already correct (Nick Craig-Wood)
|
||||
* Fix Create causing windows explorer to truncate files on CTRL-C CTRL-V (Nick Craig-Wood)
|
||||
* Fix modtimes not updating when writing via cache (Nick Craig-Wood)
|
||||
* Fix modtimes changing by fractional seconds after upload (Nick Craig-Wood)
|
||||
* Fix modtime set if `--vfs-cache-mode writes`/`full` and no write (Nick Craig-Wood)
|
||||
* Rename files in cache and cancel uploads on directory rename (Nick Craig-Wood)
|
||||
* Fix directory renaming by renaming dirs cached in memory (Nick Craig-Wood)
|
||||
* Local
|
||||
* Add flag `--local-no-preallocate` (David Sze)
|
||||
* Make `nounc` an advanced option except on Windows (albertony)
|
||||
* Don't ignore preallocate disk full errors (Nick Craig-Wood)
|
||||
* Cache
|
||||
* Add `--fs-cache-expire-duration` to control the fs cache (Nick Craig-Wood)
|
||||
* Crypt
|
||||
* Add option to not encrypt data (Vesnyx)
|
||||
* Log hash ok on upload (albertony)
|
||||
* Azure Blob
|
||||
* Add container public access level support. (Manish Kumar)
|
||||
* B2
|
||||
* Fix HTML files downloaded via cloudflare (Nick Craig-Wood)
|
||||
* Box
|
||||
* Fix transfers getting stuck on token expiry after API change (Nick Craig-Wood)
|
||||
* Chunker
|
||||
* Partially implement no-rename transactions (Maxwell Calman)
|
||||
* Drive
|
||||
* Don't stop server side copy if couldn't read description (Nick Craig-Wood)
|
||||
* Pass context on to drive SDK - to help with cancellation (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Add polling for changes support (Robert Thomas)
|
||||
* Make `--timeout 0` work properly (Nick Craig-Wood)
|
||||
* Raise priority of rate limited message to INFO to make it more noticeable (Nick Craig-Wood)
|
||||
* Fichier
|
||||
* Implement copy & move (buengese)
|
||||
* Implement public link (buengese)
|
||||
* FTP
|
||||
* Implement Shutdown method (Nick Craig-Wood)
|
||||
* Close idle connections after `--ftp-idle-timeout` (1m by default) (Nick Craig-Wood)
|
||||
* Make `--timeout 0` work properly (Nick Craig-Wood)
|
||||
* Add `--ftp-close-timeout` flag for use with awkward ftp servers (Nick Craig-Wood)
|
||||
* Retry connections and logins on 421 errors (Nick Craig-Wood)
|
||||
* Hdfs
|
||||
* Fix permissions for when directory is created (Lucas Messenger)
|
||||
* Onedrive
|
||||
* Make `--timeout 0` work properly (Nick Craig-Wood)
|
||||
* S3
|
||||
* Fix `--s3-profile` which wasn't working (Nick Craig-Wood)
|
||||
* SFTP
|
||||
* Close idle connections after `--sftp-idle-timeout` (1m by default) (Nick Craig-Wood)
|
||||
* Fix "file not found" errors for read once servers (Nick Craig-Wood)
|
||||
* Fix SetModTime stat failed: object not found with `--sftp-set-modtime=false` (Nick Craig-Wood)
|
||||
* Swift
|
||||
* Update github.com/ncw/swift to v2.0.0 (Nick Craig-Wood)
|
||||
* Implement copying large objects (nguyenhuuluan434)
|
||||
* Union
|
||||
* Fix crash when using epff policy (Nick Craig-Wood)
|
||||
* Fix union attempting to update files on a read only file system (Nick Craig-Wood)
|
||||
* Refactor to use fspath.SplitFs instead of fs.ParseRemote (Nick Craig-Wood)
|
||||
* Fix initialisation broken in refactor (Nick Craig-Wood)
|
||||
* WebDAV
|
||||
* Add support for sharepoint with NTLM authentication (Rauno Ots)
|
||||
* Make sharepoint-ntlm docs more consistent (Alex Chen)
|
||||
* Improve terminology in sharepoint-ntlm docs (Ivan Andreev)
|
||||
* Disable HTTP/2 for NTLM authentication (georne)
|
||||
* Fix sharepoint-ntlm error 401 for parallel actions (Ivan Andreev)
|
||||
* Check that purged directory really exists (Ivan Andreev)
|
||||
* Yandex
|
||||
* Make `--timeout 0` work properly (Nick Craig-Wood)
|
||||
* Zoho
|
||||
* Replace client id - you will need to `rclone config reconnect` after this (buengese)
|
||||
* Add forgotten setupRegion() to NewFs - this finally fixes regions other than EU (buengese)
|
||||
|
||||
## v1.54.1 - 2021-03-08
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.54.0...v1.54.1)
|
||||
|
||||
@@ -416,4 +416,27 @@ Choose how chunker should handle files with missing or invalid chunks.
|
||||
- "false"
|
||||
- Warn user, skip incomplete file and proceed.
|
||||
|
||||
#### --chunker-transactions
|
||||
|
||||
Choose how chunker should handle temporary files during transactions.
|
||||
|
||||
- Config: transactions
|
||||
- Env Var: RCLONE_CHUNKER_TRANSACTIONS
|
||||
- Type: string
|
||||
- Default: "rename"
|
||||
- Examples:
|
||||
- "rename"
|
||||
- Rename temporary files after a successful transaction.
|
||||
- "norename"
|
||||
- Leave temporary file names and write transaction ID to metadata file.
|
||||
- Metadata is required for no rename transactions (meta format cannot be "none").
|
||||
- If you are using norename transactions you should be careful not to downgrade Rclone
|
||||
- as older versions of Rclone don't support this transaction style and will misinterpret
|
||||
- files manipulated by norename transactions.
|
||||
- This method is EXPERIMENTAL, don't use on production systems.
|
||||
- "auto"
|
||||
- Rename or norename will be used depending on capabilities of the backend.
|
||||
- If meta format is set to "none", rename transactions will always be used.
|
||||
- This method is EXPERIMENTAL, don't use on production systems.
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
@@ -72,11 +72,13 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone rcd](/commands/rclone_rcd/) - Run rclone listening to remote control commands only.
|
||||
* [rclone rmdir](/commands/rclone_rmdir/) - Remove the empty directory at path.
|
||||
* [rclone rmdirs](/commands/rclone_rmdirs/) - Remove empty directories under the path.
|
||||
* [rclone selfupdate](/commands/rclone_selfupdate/) - Update the rclone binary.
|
||||
* [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol.
|
||||
* [rclone settier](/commands/rclone_settier/) - Changes storage class/tier of objects in remote.
|
||||
* [rclone sha1sum](/commands/rclone_sha1sum/) - Produces an sha1sum file for all the objects in the path.
|
||||
* [rclone size](/commands/rclone_size/) - Prints the total size and number of objects in remote:path.
|
||||
* [rclone sync](/commands/rclone_sync/) - Make source and dest identical, modifying destination only.
|
||||
* [rclone test](/commands/rclone_test/) - Run a test command
|
||||
* [rclone touch](/commands/rclone_touch/) - Create new file or change file modification time.
|
||||
* [rclone tree](/commands/rclone_tree/) - List the contents of the remote in a tree like fashion.
|
||||
* [rclone version](/commands/rclone_version/) - Show the version number.
|
||||
|
||||
@@ -15,15 +15,16 @@ Copy url content to dest.
|
||||
Download a URL's content and copy it to the destination without saving
|
||||
it in temporary storage.
|
||||
|
||||
Setting --auto-filename will cause the file name to be retrieved from
|
||||
Setting `--auto-filename`will cause the file name to be retrieved from
|
||||
the from URL (after any redirections) and used in the destination
|
||||
path.
|
||||
path. With `--print-filename` in addition, the resuling file name will
|
||||
be printed.
|
||||
|
||||
Setting --no-clobber will prevent overwriting file on the
|
||||
Setting `--no-clobber` will prevent overwriting file on the
|
||||
destination if there is one with the same name.
|
||||
|
||||
Setting --stdout or making the output file name "-" will cause the
|
||||
output to be written to standard output.
|
||||
Setting `--stdout` or making the output file name `-`
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
|
||||
```
|
||||
@@ -33,10 +34,11 @@ rclone copyurl https://example.com dest:path [flags]
|
||||
## Options
|
||||
|
||||
```
|
||||
-a, --auto-filename Get the file name from the URL and use it for destination file path
|
||||
-h, --help help for copyurl
|
||||
--no-clobber Prevent overwriting file with same name
|
||||
--stdout Write the output to stdout rather than a file
|
||||
-a, --auto-filename Get the file name from the URL and use it for destination file path
|
||||
-h, --help help for copyurl
|
||||
--no-clobber Prevent overwriting file with same name
|
||||
-p, --print-filename Print the resulting name from --auto-filename
|
||||
--stdout Write the output to stdout rather than a file
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
@@ -17,8 +17,8 @@ By default `dedupe` interactively finds files with duplicate
|
||||
names and offers to delete all but one or rename them to be
|
||||
different. This is known as deduping by name.
|
||||
|
||||
Deduping by name is only useful with backends like Google Drive which
|
||||
can have duplicate file names. It can be run on wrapping backends
|
||||
Deduping by name is only useful with a small group of backends (e.g. Google Drive,
|
||||
Opendrive) that can have duplicate file names. It can be run on wrapping backends
|
||||
(e.g. crypt) if they wrap a backend which supports duplicate file
|
||||
names.
|
||||
|
||||
|
||||
@@ -29,15 +29,15 @@ is an **empty** **existing** directory:
|
||||
|
||||
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
|
||||
for details. The following examples will mount to an automatically assigned drive,
|
||||
to specific drive letter `X:`, to path `C:\path\to\nonexistent\directory`
|
||||
(which must be **non-existent** subdirectory of an **existing** parent directory or drive,
|
||||
to specific drive letter `X:`, to path `C:\path\parent\mount`
|
||||
(where parent directory or drive must exist, and mount must **not** exist,
|
||||
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
|
||||
the last example will mount as network share `\\cloud\remote` and map it to an
|
||||
automatically assigned drive:
|
||||
|
||||
rclone mount remote:path/to/files *
|
||||
rclone mount remote:path/to/files X:
|
||||
rclone mount remote:path/to/files C:\path\to\nonexistent\directory
|
||||
rclone mount remote:path/to/files C:\path\parent\mount
|
||||
rclone mount remote:path/to/files \\cloud\remote
|
||||
|
||||
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
||||
@@ -91,14 +91,14 @@ and experience unexpected program errors, freezes or other issues, consider moun
|
||||
as a network drive instead.
|
||||
|
||||
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
||||
or to a path - which must be **non-existent** subdirectory of an **existing** parent
|
||||
or to a path representing a **non-existent** subdirectory of an **existing** parent
|
||||
directory or drive. Using the special value `*` will tell rclone to
|
||||
automatically assign the next available drive letter, starting with Z: and moving backward.
|
||||
Examples:
|
||||
|
||||
rclone mount remote:path/to/files *
|
||||
rclone mount remote:path/to/files X:
|
||||
rclone mount remote:path/to/files C:\path\to\nonexistent\directory
|
||||
rclone mount remote:path/to/files C:\path\parent\mount
|
||||
rclone mount remote:path/to/files X:
|
||||
|
||||
Option `--volname` can be used to set a custom volume name for the mounted
|
||||
@@ -171,10 +171,24 @@ Note that the mapping of permissions is not always trivial, and the result
|
||||
you see in Windows Explorer may not be exactly like you expected.
|
||||
For example, when setting a value that includes write access, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and "append data",
|
||||
but not "write extended attributes" (WinFsp does not support extended attributes,
|
||||
see [this](https://github.com/billziss-gh/winfsp/wiki/NTFS-Compatibility)).
|
||||
Windows will then show this as basic permission "Special" instead of "Write",
|
||||
because "Write" includes the "write extended attributes" permission.
|
||||
but not "write extended attributes". Windows will then show this as basic
|
||||
permission "Special" instead of "Write", because "Write" includes the
|
||||
"write extended attributes" permission.
|
||||
|
||||
If you set POSIX permissions for only allowing access to the owner, using
|
||||
`--file-perms 0600 --dir-perms 0700`, the user group and the built-in "Everyone"
|
||||
group will still be given some special permissions, such as "read attributes"
|
||||
and "read permissions", in Windows. This is done for compatibility reasons,
|
||||
e.g. to allow users without additional permissions to be able to read basic
|
||||
metadata about files like in UNIX. One case that may arise is that other programs
|
||||
(incorrectly) interprets this as the file being accessible by everyone. For example
|
||||
an SSH client may warn about "unprotected private key file".
|
||||
|
||||
WinFsp 2021 (version 1.9, still in beta) introduces a new FUSE option "FileSecurity",
|
||||
that allows the complete specification of file security descriptors using
|
||||
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
|
||||
With this you can work around issues such as the mentioned "unprotected private key file"
|
||||
by specifying `-o FileSecurity="D:P(A;;FA;;;OW)"`, for file all access (FA) to the owner (OW).
|
||||
|
||||
### Windows caveats
|
||||
|
||||
@@ -378,6 +392,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
@@ -521,6 +542,19 @@ If the flag is not provided on the command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
## Alternate report of used bytes
|
||||
|
||||
Some backends, most notably S3, do not report the amount of bytes used.
|
||||
If you need this information to be available when running `df` on the
|
||||
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
|
||||
With this flag set, instead of relying on the backend to report this
|
||||
information, rclone will scan the whole remote similar to `rclone size`
|
||||
and compute the total used space itself.
|
||||
|
||||
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
|
||||
```
|
||||
rclone mount remote:path /path/to/mountpoint [flags]
|
||||
@@ -565,6 +599,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
--volname string Set the volume name. Supported on Windows and OSX only.
|
||||
|
||||
84
docs/content/commands/rclone_selfupdate.md
Normal file
84
docs/content/commands/rclone_selfupdate.md
Normal file
@@ -0,0 +1,84 @@
|
||||
---
|
||||
title: "rclone selfupdate"
|
||||
description: "Update the rclone binary."
|
||||
slug: rclone_selfupdate
|
||||
url: /commands/rclone_selfupdate/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/selfupdate/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone selfupdate
|
||||
|
||||
Update the rclone binary.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
This command downloads the latest release of rclone and replaces
|
||||
the currently running binary. The download is verified with a hashsum
|
||||
and cryptographically signed signature.
|
||||
|
||||
If used without flags (or with implied `--stable` flag), this command
|
||||
will install the latest stable release. However, some issues may be fixed
|
||||
(or features added) only in the latest beta release. In such cases you should
|
||||
run the command with the `--beta` flag, i.e. `rclone selfupdate --beta`.
|
||||
You can check in advance what version would be installed by adding the
|
||||
`--check` flag, then repeat the command without it when you are satisfied.
|
||||
|
||||
Sometimes the rclone team may recommend you a concrete beta or stable
|
||||
rclone release to troubleshoot your issue or add a bleeding edge feature.
|
||||
The `--version VER` flag, if given, will update to the concrete version
|
||||
instead of the latest one. If you omit micro version from `VER` (for
|
||||
example `1.53`), the latest matching micro version will be used.
|
||||
|
||||
Upon successful update rclone will print a message that contains a previous
|
||||
version number. You will need it if you later decide to revert your update
|
||||
for some reason. Then you'll have to note the previous version and run the
|
||||
following command: `rclone selfupdate [--beta] OLDVER`.
|
||||
If the old version contains only dots and digits (for example `v1.54.0`)
|
||||
then it's a stable release so you won't need the `--beta` flag. Beta releases
|
||||
have an additional information similar to `v1.54.0-beta.5111.06f1c0c61`.
|
||||
(if you are a developer and use a locally built rclone, the version number
|
||||
will end with `-DEV`, you will have to rebuild it as it obvisously can't
|
||||
be distributed).
|
||||
|
||||
If you previously installed rclone via a package manager, the package may
|
||||
include local documentation or configure services. You may wish to update
|
||||
with the flag `--package deb` or `--package rpm` (whichever is correct for
|
||||
your OS) to update these too. This command with the default `--package zip`
|
||||
will update only the rclone executable so the local manual may become
|
||||
inaccurate after it.
|
||||
|
||||
The `rclone mount` command (https://rclone.org/commands/rclone_mount/) may
|
||||
or may not support extended FUSE options depending on the build and OS.
|
||||
`selfupdate` will refuse to update if the capability would be discarded.
|
||||
|
||||
Note: Windows forbids deletion of a currently running executable so this
|
||||
command will rename the old executable to 'rclone.old.exe' upon success.
|
||||
|
||||
Please note that this command was not available before rclone version 1.55.
|
||||
If it fails for you with the message `unknown command "selfupdate"` then
|
||||
you will need to update manually following the install instructions located
|
||||
at https://rclone.org/install/
|
||||
|
||||
|
||||
```
|
||||
rclone selfupdate [flags]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
```
|
||||
--beta Install beta release.
|
||||
--check Check for latest release, do not download.
|
||||
-h, --help help for selfupdate
|
||||
--output string Save the downloaded binary at a given path (default: replace running binary)
|
||||
--package string Package format: zip|deb|rpm (default: zip)
|
||||
--stable Install stable release (this is the default)
|
||||
--version string Install the given rclone version (default: latest)
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
@@ -134,6 +134,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
@@ -277,6 +284,19 @@ If the flag is not provided on the command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
## Alternate report of used bytes
|
||||
|
||||
Some backends, most notably S3, do not report the amount of bytes used.
|
||||
If you need this information to be available when running `df` on the
|
||||
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
|
||||
With this flag set, instead of relying on the backend to report this
|
||||
information, rclone will scan the whole remote similar to `rclone size`
|
||||
and compute the total used space itself.
|
||||
|
||||
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
|
||||
```
|
||||
rclone serve dlna remote:path [flags]
|
||||
@@ -309,6 +329,7 @@ rclone serve dlna remote:path [flags]
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
```
|
||||
|
||||
@@ -133,6 +133,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
@@ -276,6 +283,19 @@ If the flag is not provided on the command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
## Alternate report of used bytes
|
||||
|
||||
Some backends, most notably S3, do not report the amount of bytes used.
|
||||
If you need this information to be available when running `df` on the
|
||||
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
|
||||
With this flag set, instead of relying on the backend to report this
|
||||
information, rclone will scan the whole remote similar to `rclone size`
|
||||
and compute the total used space itself.
|
||||
|
||||
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## Auth Proxy
|
||||
|
||||
If you supply the parameter `--auth-proxy /path/to/program` then
|
||||
@@ -394,6 +414,7 @@ rclone serve ftp remote:path [flags]
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
```
|
||||
|
||||
@@ -205,6 +205,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
@@ -348,6 +355,19 @@ If the flag is not provided on the command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
## Alternate report of used bytes
|
||||
|
||||
Some backends, most notably S3, do not report the amount of bytes used.
|
||||
If you need this information to be available when running `df` on the
|
||||
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
|
||||
With this flag set, instead of relying on the backend to report this
|
||||
information, rclone will scan the whole remote similar to `rclone size`
|
||||
and compute the total used space itself.
|
||||
|
||||
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
|
||||
```
|
||||
rclone serve http remote:path [flags]
|
||||
@@ -390,6 +410,7 @@ rclone serve http remote:path [flags]
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
```
|
||||
|
||||
@@ -144,6 +144,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
@@ -287,6 +294,19 @@ If the flag is not provided on the command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
## Alternate report of used bytes
|
||||
|
||||
Some backends, most notably S3, do not report the amount of bytes used.
|
||||
If you need this information to be available when running `df` on the
|
||||
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
|
||||
With this flag set, instead of relying on the backend to report this
|
||||
information, rclone will scan the whole remote similar to `rclone size`
|
||||
and compute the total used space itself.
|
||||
|
||||
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## Auth Proxy
|
||||
|
||||
If you supply the parameter `--auth-proxy /path/to/program` then
|
||||
@@ -404,6 +424,7 @@ rclone serve sftp remote:path [flags]
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
```
|
||||
|
||||
@@ -213,6 +213,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
@@ -356,6 +363,19 @@ If the flag is not provided on the command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
## Alternate report of used bytes
|
||||
|
||||
Some backends, most notably S3, do not report the amount of bytes used.
|
||||
If you need this information to be available when running `df` on the
|
||||
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
|
||||
With this flag set, instead of relying on the backend to report this
|
||||
information, rclone will scan the whole remote similar to `rclone size`
|
||||
and compute the total used space itself.
|
||||
|
||||
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## Auth Proxy
|
||||
|
||||
If you supply the parameter `--auth-proxy /path/to/program` then
|
||||
@@ -482,6 +502,7 @@ rclone serve webdav remote:path [flags]
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
```
|
||||
|
||||
@@ -15,7 +15,8 @@ Make source and dest identical, modifying destination only.
|
||||
Sync the source to the destination, changing the destination
|
||||
only. Doesn't transfer unchanged files, testing by size and
|
||||
modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary.
|
||||
source, including deleting files if necessary (except duplicate
|
||||
objects, see below).
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
@@ -23,7 +24,8 @@ source, including deleting files if necessary.
|
||||
rclone sync -i SOURCE remote:DESTINATION
|
||||
|
||||
Note that files in the destination won't be deleted if there were any
|
||||
errors at any point.
|
||||
errors at any point. Duplicate objects (files with the same name, on
|
||||
those providers that support it) are also not yet handled.
|
||||
|
||||
It is always the contents of the directory that is synced, not the
|
||||
directory so when source:path is a directory, it's the contents of
|
||||
@@ -35,6 +37,9 @@ go there.
|
||||
|
||||
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
|
||||
|
||||
**Note**: Use the `rclone dedupe` command to deal with "Duplicate object/directory found in source/destination - ignoring" errors.
|
||||
See [this forum post](https://forum.rclone.org/t/sync-not-clearing-duplicates/14372) for more info.
|
||||
|
||||
|
||||
```
|
||||
rclone sync source:path dest:path [flags]
|
||||
|
||||
41
docs/content/commands/rclone_test.md
Normal file
41
docs/content/commands/rclone_test.md
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
title: "rclone test"
|
||||
description: "Run a test command"
|
||||
slug: rclone_test
|
||||
url: /commands/rclone_test/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone test
|
||||
|
||||
Run a test command
|
||||
|
||||
## Synopsis
|
||||
|
||||
Rclone test is used to run test commands.
|
||||
|
||||
Select which test comand you want with the subcommand, eg
|
||||
|
||||
rclone test memory remote:
|
||||
|
||||
Each subcommand has its own options which you can see in their help.
|
||||
|
||||
**NB** Be careful running these commands, they may do strange things
|
||||
so reading their documentation first is recommended.
|
||||
|
||||
|
||||
## Options
|
||||
|
||||
```
|
||||
-h, --help help for test
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
* [rclone test histogram](/commands/rclone_test_histogram/) - Makes a histogram of file name characters.
|
||||
* [rclone test info](/commands/rclone_test_info/) - Discovers file name or other limitations for paths.
|
||||
* [rclone test makefiles](/commands/rclone_test_makefiles/) - Make a random file hierarchy in <dir>
|
||||
* [rclone test memory](/commands/rclone_test_memory/) - Load all the objects at remote:path into memory and report memory stats.
|
||||
|
||||
36
docs/content/commands/rclone_test_histogram.md
Normal file
36
docs/content/commands/rclone_test_histogram.md
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
title: "rclone test histogram"
|
||||
description: "Makes a histogram of file name characters."
|
||||
slug: rclone_test_histogram
|
||||
url: /commands/rclone_test_histogram/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/histogram/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone test histogram
|
||||
|
||||
Makes a histogram of file name characters.
|
||||
|
||||
## Synopsis
|
||||
|
||||
This command outputs JSON which shows the histogram of characters used
|
||||
in filenames in the remote:path specified.
|
||||
|
||||
The data doesn't contain any identifying information but is useful for
|
||||
the rclone developers when developing filename compression.
|
||||
|
||||
|
||||
```
|
||||
rclone test histogram [remote:path] [flags]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
```
|
||||
-h, --help help for histogram
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
* [rclone test](/commands/rclone_test/) - Run a test command
|
||||
|
||||
44
docs/content/commands/rclone_test_info.md
Normal file
44
docs/content/commands/rclone_test_info.md
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
title: "rclone test info"
|
||||
description: "Discovers file name or other limitations for paths."
|
||||
slug: rclone_test_info
|
||||
url: /commands/rclone_test_info/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/info/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone test info
|
||||
|
||||
Discovers file name or other limitations for paths.
|
||||
|
||||
## Synopsis
|
||||
|
||||
rclone info discovers what filenames and upload methods are possible
|
||||
to write to the paths passed in and how long they can be. It can take some
|
||||
time. It will write test files into the remote:path passed in. It outputs
|
||||
a bit of go code for each one.
|
||||
|
||||
**NB** this can create undeletable files and other hazards - use with care
|
||||
|
||||
|
||||
```
|
||||
rclone test info [remote:path]+ [flags]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
```
|
||||
--all Run all tests.
|
||||
--check-control Check control characters.
|
||||
--check-length Check max filename length.
|
||||
--check-normalization Check UTF-8 Normalization.
|
||||
--check-streaming Check uploads with indeterminate file size.
|
||||
-h, --help help for info
|
||||
--upload-wait duration Wait after writing a file.
|
||||
--write-json string Write results to file.
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
* [rclone test](/commands/rclone_test/) - Run a test command
|
||||
|
||||
33
docs/content/commands/rclone_test_makefiles.md
Normal file
33
docs/content/commands/rclone_test_makefiles.md
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
title: "rclone test makefiles"
|
||||
description: "Make a random file hierarchy in <dir>"
|
||||
slug: rclone_test_makefiles
|
||||
url: /commands/rclone_test_makefiles/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/makefiles/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone test makefiles
|
||||
|
||||
Make a random file hierarchy in <dir>
|
||||
|
||||
```
|
||||
rclone test makefiles <dir> [flags]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
```
|
||||
--files int Number of files to create (default 1000)
|
||||
--files-per-directory int Average number of files per directory (default 10)
|
||||
-h, --help help for makefiles
|
||||
--max-file-size SizeSuffix Maximum size of files to create (default 100)
|
||||
--max-name-length int Maximum size of file names (default 12)
|
||||
--min-file-size SizeSuffix Minimum size of file to create
|
||||
--min-name-length int Minimum size of file names (default 4)
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
* [rclone test](/commands/rclone_test/) - Run a test command
|
||||
|
||||
27
docs/content/commands/rclone_test_memory.md
Normal file
27
docs/content/commands/rclone_test_memory.md
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
title: "rclone test memory"
|
||||
description: "Load all the objects at remote:path into memory and report memory stats."
|
||||
slug: rclone_test_memory
|
||||
url: /commands/rclone_test_memory/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/memory/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone test memory
|
||||
|
||||
Load all the objects at remote:path into memory and report memory stats.
|
||||
|
||||
```
|
||||
rclone test memory remote:path [flags]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
```
|
||||
-h, --help help for memory
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
* [rclone test](/commands/rclone_test/) - Run a test command
|
||||
|
||||
@@ -12,14 +12,21 @@ Show the version number.
|
||||
## Synopsis
|
||||
|
||||
|
||||
Show the version number, the go version and the architecture.
|
||||
Show the rclone version number, the go version, the build target OS and
|
||||
architecture, build tags and the type of executable (static or dynamic).
|
||||
|
||||
Eg
|
||||
For example:
|
||||
|
||||
$ rclone version
|
||||
rclone v1.41
|
||||
- os/arch: linux/amd64
|
||||
- go version: go1.10
|
||||
rclone v1.54
|
||||
- os/type: linux
|
||||
- os/arch: amd64
|
||||
- go/version: go1.16
|
||||
- go/linking: static
|
||||
- go/tags: none
|
||||
|
||||
Note: before rclone version 1.55 the os/type and os/arch lines were merged,
|
||||
and the "go/version" line was tagged as "go version".
|
||||
|
||||
If you supply the --check flag, then it will do an online check to
|
||||
compare your version with the latest release and the latest beta.
|
||||
|
||||
@@ -517,6 +517,20 @@ names, or for debugging purposes.
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --crypt-no-data-encryption
|
||||
|
||||
Option to either encrypt file data or leave it unencrypted.
|
||||
|
||||
- Config: no_data_encryption
|
||||
- Env Var: RCLONE_CRYPT_NO_DATA_ENCRYPTION
|
||||
- Type: bool
|
||||
- Default: false
|
||||
- Examples:
|
||||
- "true"
|
||||
- Don't encrypt file data, leave it unencrypted.
|
||||
- "false"
|
||||
- Encrypt file data.
|
||||
|
||||
### Backend commands
|
||||
|
||||
Here are the commands specific to the crypt backend.
|
||||
|
||||
@@ -654,7 +654,7 @@ If you run `rclone config file` you will see where the default
|
||||
location is for you.
|
||||
|
||||
Use this flag to override the config location, e.g. `rclone
|
||||
--config=".myconfig" .config`.
|
||||
--config=".myconfig" config`.
|
||||
|
||||
If the location is set to empty string `""` or the special value
|
||||
`/notfound`, or the os null device represented by value `NUL` on
|
||||
@@ -787,6 +787,27 @@ triggering follow-on actions if data was copied, or skipping if not.
|
||||
NB: Enabling this option turns a usually non-fatal error into a potentially
|
||||
fatal one - please check and adjust your scripts accordingly!
|
||||
|
||||
### --fs-cache-expire-duration=TIME
|
||||
|
||||
When using rclone via the API rclone caches created remotes for 5
|
||||
minutes by default in the "fs cache". This means that if you do
|
||||
repeated actions on the same remote then rclone won't have to build it
|
||||
again from scratch, which makes it more efficient.
|
||||
|
||||
This flag sets the time that the remotes are cached for. If you set it
|
||||
to `0` (or negative) then rclone won't cache the remotes at all.
|
||||
|
||||
Note that if you use some flags, eg `--backup-dir` and if this is set
|
||||
to `0` rclone may build two remotes (one for the source or destination
|
||||
and one for the `--backup-dir` where it may have only built one
|
||||
before.
|
||||
|
||||
### --fs-cache-expire-interval=TIME
|
||||
|
||||
This controls how often rclone checks for cached remotes to expire.
|
||||
See the `--fs-cache-expire-duration` documentation above for more
|
||||
info. The default is 60s, set to 0 to disable expiry.
|
||||
|
||||
### --header ###
|
||||
|
||||
Add an HTTP header for all transactions. The flag can be repeated to
|
||||
@@ -2098,7 +2119,7 @@ mys3:
|
||||
Note that if you want to create a remote using environment variables
|
||||
you must create the `..._TYPE` variable as above.
|
||||
|
||||
Note also that now rclone has [connectionstrings](#connection-strings),
|
||||
Note also that now rclone has [connection strings](#connection-strings),
|
||||
it is probably easier to use those instead which makes the above example
|
||||
|
||||
rclone lsd :s3,access_key_id=XXX,secret_access_key=XXX:
|
||||
|
||||
@@ -197,6 +197,21 @@ memory. It can be set smaller if you are tight on memory.
|
||||
|
||||
Impersonate this user when using a business account.
|
||||
|
||||
Note that if you want to use impersonate, you should make sure this
|
||||
flag is set when running "rclone config" as this will cause rclone to
|
||||
request the "members.read" scope which it won't normally. This is
|
||||
needed to lookup a members email address into the internal ID that
|
||||
dropbox uses in the API.
|
||||
|
||||
Using the "members.read" scope will require a Dropbox Team Admin
|
||||
to approve during the OAuth flow.
|
||||
|
||||
You will have to use your own App (setting your own client_id and
|
||||
client_secret) to use this option as currently rclone's default set of
|
||||
permissions doesn't include "members.read". This can be added once
|
||||
v1.55 or later is in use everywhere.
|
||||
|
||||
|
||||
- Config: impersonate
|
||||
- Env Var: RCLONE_DROPBOX_IMPERSONATE
|
||||
- Type: string
|
||||
@@ -270,6 +285,12 @@ dropbox:dir` will return the error `Failed to purge: There are too
|
||||
many files involved in this operation`. As a work-around do an
|
||||
`rclone delete dropbox:dir` followed by an `rclone rmdir dropbox:dir`.
|
||||
|
||||
When using `rclone link` you'll need to set `--expire` if using a
|
||||
non-personal account otherwise the visibility may not be correct.
|
||||
(Note that `--expire` isn't supported on personal accounts). See the
|
||||
[forum discussion](https://forum.rclone.org/t/rclone-link-dropbox-permissions/23211) and the
|
||||
[dropbox SDK issue](https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75).
|
||||
|
||||
### Get your own Dropbox App ID ###
|
||||
|
||||
When you use rclone with Dropbox in its default configuration you are using rclone's App ID. This is shared between all the rclone users.
|
||||
|
||||
@@ -27,10 +27,10 @@ These flags are available for every command.
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--client-cert string Client SSL certificate (PEM) for mutual TLS auth
|
||||
--client-key string Client SSL private key (PEM) for mutual TLS auth
|
||||
--compare-dest string Include additional server-side path during comparison.
|
||||
--compare-dest stringArray Include additional comma separated server-side paths during comparison.
|
||||
--config string Config file. (default "$HOME/.config/rclone/rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
--copy-dest string Implies --compare-dest but also copies files from path into destination.
|
||||
--copy-dest stringArray Implies --compare-dest but also copies files from paths into destination.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
@@ -39,10 +39,10 @@ These flags are available for every command.
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dscp string Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21.
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP headers - may contain sensitive info
|
||||
--dscp DSCP Name or Value (default 0)
|
||||
--error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file (use - to read from stdin)
|
||||
@@ -53,6 +53,8 @@ These flags are available for every command.
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file (use - to read from stdin)
|
||||
--fs-cache-expire-duration duration cache remotes for this long (0 to disable caching) (default 5m0s)
|
||||
--fs-cache-expire-interval duration interval to check for expired remotes (default 1m0s)
|
||||
--header stringArray Set HTTP header for all transactions
|
||||
--header-download stringArray Set HTTP header for download transactions
|
||||
--header-upload stringArray Set HTTP header for upload transactions
|
||||
@@ -151,7 +153,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.54.0")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.55.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
@@ -184,6 +186,7 @@ and may be set in the config file.
|
||||
--azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.
|
||||
--azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.
|
||||
--azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.
|
||||
--azureblob-public-access string Public access level of a container: blob, container.
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-service-principal-file string Path to file containing credentials for use with a service principal.
|
||||
--azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256MB). (Deprecated)
|
||||
@@ -247,6 +250,7 @@ and may be set in the config file.
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted.
|
||||
--crypt-password string Password or pass phrase for encryption. (obscured)
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended. (obscured)
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
@@ -282,7 +286,7 @@ and may be set in the config file.
|
||||
--drive-starred-only Only show files that are starred.
|
||||
--drive-stop-on-download-limit Make download limit errors be fatal
|
||||
--drive-stop-on-upload-limit Make upload limit errors be fatal
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-team-drive string ID of the Shared Drive (Team Drive)
|
||||
--drive-token string OAuth Access Token as a JSON blob.
|
||||
--drive-token-url string Token server url.
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
@@ -311,12 +315,14 @@ and may be set in the config file.
|
||||
--filefabric-token-expiry string Token expiry time
|
||||
--filefabric-url string URL of the Enterprise File Fabric to connect to
|
||||
--filefabric-version string Version read from the file fabric
|
||||
--ftp-close-timeout Duration Maximum time to wait for a response to close. (default 1m0s)
|
||||
--ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
|
||||
--ftp-disable-epsv Disable using EPSV even if server advertises support
|
||||
--ftp-disable-mlsd Disable using MLSD even if server advertises support
|
||||
--ftp-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,RightSpace,Dot)
|
||||
--ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
|
||||
--ftp-no-check-certificate Do not verify the TLS certificate of the server
|
||||
--ftp-pass string FTP password (obscured)
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
@@ -378,6 +384,7 @@ and may be set in the config file.
|
||||
--local-case-sensitive Force the filesystem to report itself as case sensitive.
|
||||
--local-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Dot)
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-preallocate Disable preallocation of disk space for transferred files
|
||||
--local-no-set-modtime Disable setting modtime
|
||||
--local-no-sparse Disable sparse files for multi-thread downloads
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
@@ -408,6 +415,7 @@ and may be set in the config file.
|
||||
--onedrive-link-password string Set the password for links created by the link command.
|
||||
--onedrive-link-scope string Set the scope of the links created by the link command. (default "anonymous")
|
||||
--onedrive-link-type string Set the type of the links created by the link command. (default "view")
|
||||
--onedrive-list-chunk int Size of listing chunk. (default 1000)
|
||||
--onedrive-no-versions Remove all versions on modifying operations
|
||||
--onedrive-region string Choose national cloud region for OneDrive. (default "global")
|
||||
--onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
||||
@@ -482,8 +490,10 @@ and may be set in the config file.
|
||||
--seafile-url string URL of seafile host to connect to
|
||||
--seafile-user string User name (usually email address)
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-concurrent-reads If set don't use concurrent reads
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file. (obscured)
|
||||
--sftp-key-pem string Raw PEM-encoded private key, If specified, will override key_file parameter.
|
||||
@@ -553,9 +563,10 @@ and may be set in the config file.
|
||||
--union-upstreams string List of space separated upstreams.
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
|
||||
--webdav-bearer-token-command string Command to run to get a bearer token
|
||||
--webdav-encoding string This sets the encoding for the backend.
|
||||
--webdav-pass string Password. (obscured)
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-user string User name. In case NTLM authentication is used, the username should be in the format 'Domain\User'.
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-auth-url string Auth server URL.
|
||||
--yandex-client-id string OAuth Client Id
|
||||
@@ -563,6 +574,11 @@ and may be set in the config file.
|
||||
--yandex-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--yandex-token string OAuth Access Token as a JSON blob.
|
||||
--yandex-token-url string Token server url.
|
||||
--zoho-auth-url string Auth server URL.
|
||||
--zoho-client-id string OAuth Client Id
|
||||
--zoho-client-secret string OAuth Client Secret
|
||||
--zoho-encoding MultiEncoder This sets the encoding for the backend. (default Del,Ctl,InvalidUtf8)
|
||||
--zoho-region string Zoho region to connect to. You'll have to use the region you organization is registered in.
|
||||
--zoho-token string OAuth Access Token as a JSON blob.
|
||||
--zoho-token-url string Token server url.
|
||||
```
|
||||
|
||||
@@ -223,6 +223,30 @@ Disable using MLSD even if server advertises support
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --ftp-idle-timeout
|
||||
|
||||
Max time before closing idle connections
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
|
||||
Set to 0 to keep connections indefinitely.
|
||||
|
||||
|
||||
- Config: idle_timeout
|
||||
- Env Var: RCLONE_FTP_IDLE_TIMEOUT
|
||||
- Type: Duration
|
||||
- Default: 1m0s
|
||||
|
||||
#### --ftp-close-timeout
|
||||
|
||||
Maximum time to wait for a response to close.
|
||||
|
||||
- Config: close_timeout
|
||||
- Env Var: RCLONE_FTP_CLOSE_TIMEOUT
|
||||
- Type: Duration
|
||||
- Default: 1m0s
|
||||
|
||||
#### --ftp-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
|
||||
@@ -101,12 +101,12 @@ case "$OS_type" in
|
||||
i?86|x86)
|
||||
OS_type='386'
|
||||
;;
|
||||
aarch64|arm64)
|
||||
OS_type='arm64'
|
||||
;;
|
||||
arm*)
|
||||
OS_type='arm'
|
||||
;;
|
||||
aarch64)
|
||||
OS_type='arm64'
|
||||
;;
|
||||
*)
|
||||
echo 'OS type not supported'
|
||||
exit 2
|
||||
|
||||
@@ -320,9 +320,9 @@ filesystem.
|
||||
where it isn't supported (e.g. Windows) it will be ignored.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/local/local.go then run make backenddocs" >}}
|
||||
### Standard Options
|
||||
### Advanced Options
|
||||
|
||||
Here are the standard options specific to local (Local Disk).
|
||||
Here are the advanced options specific to local (Local Disk).
|
||||
|
||||
#### --local-nounc
|
||||
|
||||
@@ -336,10 +336,6 @@ Disable UNC (long path names) conversion on Windows
|
||||
- "true"
|
||||
- Disables long file names
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to local (Local Disk).
|
||||
|
||||
#### --copy-links / -L
|
||||
|
||||
Follow symlinks and copy the pointed to item.
|
||||
|
||||
@@ -325,6 +325,15 @@ fall back to normal copy (which will be slightly slower).
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --onedrive-list-chunk
|
||||
|
||||
Size of listing chunk.
|
||||
|
||||
- Config: list_chunk
|
||||
- Env Var: RCLONE_ONEDRIVE_LIST_CHUNK
|
||||
- Type: int
|
||||
- Default: 1000
|
||||
|
||||
#### --onedrive-no-versions
|
||||
|
||||
Remove all versions on modifying operations
|
||||
|
||||
@@ -378,6 +378,55 @@ call and taken by the [options/set](#options-set) calls as well as the
|
||||
- `BandwidthSpec` - this will be set and returned as a string, eg
|
||||
"1M".
|
||||
|
||||
## Specifying remotes to work on
|
||||
|
||||
Remotes are specified with the `fs=`, `srcFs=`, `dstFs=`
|
||||
parameters depending on the command being used.
|
||||
|
||||
The parameters can be a string as per the rest of rclone, eg
|
||||
`s3:bucket/path` or `:sftp:/my/dir`. They can also be specified as
|
||||
JSON blobs.
|
||||
|
||||
If specifyng a JSON blob it should be a object mapping strings to
|
||||
strings. These values will be used to configure the remote. There are
|
||||
3 special values which may be set:
|
||||
|
||||
- `type` - set to `type` to specify a remote called `:type:`
|
||||
- `_name` - set to `name` to specify a remote called `name:`
|
||||
- `_root` - sets the root of the remote - may be empty
|
||||
|
||||
One of `_name` or `type` should normally be set. If the `local`
|
||||
backend is desired then `type` should be set to `local`. If `_root`
|
||||
isn't specified then it defaults to the root of the remote.
|
||||
|
||||
For example this JSON is equivalent to `remote:/tmp`
|
||||
|
||||
```
|
||||
{
|
||||
"_name": "remote",
|
||||
"_path": "/tmp"
|
||||
}
|
||||
```
|
||||
|
||||
And this is equivalent to `:sftp,host='example.com':/tmp`
|
||||
|
||||
```
|
||||
{
|
||||
"type": "sftp",
|
||||
"host": "example.com",
|
||||
"_path": "/tmp"
|
||||
}
|
||||
```
|
||||
|
||||
And this is equivalent to `/tmp/dir`
|
||||
|
||||
```
|
||||
{
|
||||
type = "local",
|
||||
_ path = "/tmp/dir"
|
||||
}
|
||||
```
|
||||
|
||||
## Supported commands
|
||||
{{< rem autogenerated start "- run make rcdocs - don't edit here" >}}
|
||||
### backend/command: Runs a backend command. {#backend-command}
|
||||
@@ -716,18 +765,22 @@ Returns the following values:
|
||||
|
||||
```
|
||||
{
|
||||
"speed": average speed in bytes/sec since start of the process,
|
||||
"bytes": total transferred bytes since the start of the process,
|
||||
"bytes": total transferred bytes since the start of the group,
|
||||
"checks": number of files checked,
|
||||
"deletes" : number of files deleted,
|
||||
"elapsedTime": time in floating point seconds since rclone was started,
|
||||
"errors": number of errors,
|
||||
"fatalError": whether there has been at least one FatalError,
|
||||
"retryError": whether there has been at least one non-NoRetryError,
|
||||
"checks": number of checked files,
|
||||
"transfers": number of transferred files,
|
||||
"deletes" : number of deleted files,
|
||||
"renames" : number of renamed files,
|
||||
"eta": estimated time in seconds until the group completes,
|
||||
"fatalError": boolean whether there has been at least one fatal error,
|
||||
"lastError": last error string,
|
||||
"renames" : number of files renamed,
|
||||
"retryError": boolean showing whether there has been at least one non-NoRetryError,
|
||||
"speed": average speed in bytes/sec since start of the group,
|
||||
"totalBytes": total number of bytes in the group,
|
||||
"totalChecks": total number of checks in the group,
|
||||
"totalTransfers": total number of transfers in the group,
|
||||
"transferTime" : total time spent on running jobs,
|
||||
"elapsedTime": time in seconds since the start of the process,
|
||||
"lastError": last occurred error,
|
||||
"transfers": number of transferred files,
|
||||
"transferring": an array of currently active file transfers:
|
||||
[
|
||||
{
|
||||
@@ -808,6 +861,8 @@ This shows the current version of go and the go runtime
|
||||
- os - OS in use as according to Go
|
||||
- arch - cpu architecture in use according to Go
|
||||
- goVersion - version of Go runtime in use
|
||||
- linking - type of rclone executable (static or dynamic)
|
||||
- goTags - space separated build tags or "none"
|
||||
|
||||
### debug/set-block-profile-rate: Set runtime.SetBlockProfileRate for blocking profiling. {#debug-set-block-profile-rate}
|
||||
|
||||
@@ -847,6 +902,26 @@ Results
|
||||
|
||||
- previousRate - int
|
||||
|
||||
### fscache/clear: Clear the Fs cache. {#fscache-clear}
|
||||
|
||||
This clears the fs cache. This is where remotes created from backends
|
||||
are cached for a short while to make repeated rc calls more efficient.
|
||||
|
||||
If you change the parameters of a backend then you may want to call
|
||||
this to clear an existing remote out of the cache before re-creating
|
||||
it.
|
||||
|
||||
**Authentication is required for this call.**
|
||||
|
||||
### fscache/entries: Returns the number of entries in the fs cache. {#fscache-entries}
|
||||
|
||||
This returns the number of entries in the fs cache.
|
||||
|
||||
Returns
|
||||
- entries - number of items in the cache
|
||||
|
||||
**Authentication is required for this call.**
|
||||
|
||||
### job/list: Lists the IDs of the running jobs {#job-list}
|
||||
|
||||
Parameters - None
|
||||
@@ -1207,6 +1282,7 @@ This takes the following parameters
|
||||
- fs - a remote name string e.g. "drive:"
|
||||
- remote - a path within that remote e.g. "dir"
|
||||
- each part in body represents a file to be uploaded
|
||||
See the [uploadfile command](/commands/rclone_uploadfile/) command for more information on the above.
|
||||
|
||||
**Authentication is required for this call.**
|
||||
|
||||
@@ -1215,11 +1291,31 @@ This takes the following parameters
|
||||
Returns
|
||||
- options - a list of the options block names
|
||||
|
||||
### options/get: Get all the options {#options-get}
|
||||
### options/get: Get all the global options {#options-get}
|
||||
|
||||
Returns an object where keys are option block names and values are an
|
||||
object with the current option values in.
|
||||
|
||||
Note that these are the global options which are unaffected by use of
|
||||
the _config and _filter parameters. If you wish to read the parameters
|
||||
set in _config then use options/config and for _filter use options/filter.
|
||||
|
||||
This shows the internal names of the option within rclone which should
|
||||
map to the external options very easily with a few exceptions.
|
||||
|
||||
### options/local: Get the currently active config for this call {#options-local}
|
||||
|
||||
Returns an object with the keys "config" and "filter".
|
||||
The "config" key contains the local config and the "filter" key contains
|
||||
the local filters.
|
||||
|
||||
Note that these are the local options specific to this rc call. If
|
||||
_config was not supplied then they will be the global options.
|
||||
Likewise with "_filter".
|
||||
|
||||
This call is mostly useful for seeing if _config and _filter passing
|
||||
is working.
|
||||
|
||||
This shows the internal names of the option within rclone which should
|
||||
map to the external options very easily with a few exceptions.
|
||||
|
||||
@@ -1372,6 +1468,7 @@ This takes the following parameters
|
||||
|
||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||
- createEmptySrcDirs - create empty src directories on destination if set
|
||||
|
||||
|
||||
See the [copy command](/commands/rclone_copy/) command for more information on the above.
|
||||
@@ -1384,6 +1481,7 @@ This takes the following parameters
|
||||
|
||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||
- createEmptySrcDirs - create empty src directories on destination if set
|
||||
- deleteEmptySrcDirs - delete empty src directories if set
|
||||
|
||||
|
||||
@@ -1397,6 +1495,7 @@ This takes the following parameters
|
||||
|
||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||
- createEmptySrcDirs - create empty src directories on destination if set
|
||||
|
||||
|
||||
See the [sync command](/commands/rclone_sync/) command for more information on the above.
|
||||
|
||||
@@ -496,6 +496,44 @@ any given time.
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --sftp-disable-concurrent-reads
|
||||
|
||||
If set don't use concurrent reads
|
||||
|
||||
Normally concurrent reads are safe to use and not using them will
|
||||
degrade performance, so this option is disabled by default.
|
||||
|
||||
Some servers limit the amount number of times a file can be
|
||||
downloaded. Using concurrent reads can trigger this limit, so if you
|
||||
have a server which returns
|
||||
|
||||
Failed to copy: file does not exist
|
||||
|
||||
Then you may need to enable this flag.
|
||||
|
||||
If concurrent reads are disabled, the use_fstat option is ignored.
|
||||
|
||||
|
||||
- Config: disable_concurrent_reads
|
||||
- Env Var: RCLONE_SFTP_DISABLE_CONCURRENT_READS
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --sftp-idle-timeout
|
||||
|
||||
Max time before closing idle connections
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
|
||||
Set to 0 to keep connections indefinitely.
|
||||
|
||||
|
||||
- Config: idle_timeout
|
||||
- Env Var: RCLONE_SFTP_IDLE_TIMEOUT
|
||||
- Type: Duration
|
||||
- Default: 1m0s
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
### Limitations ###
|
||||
|
||||
@@ -137,23 +137,21 @@ Name of the Webdav site/service/software you are using
|
||||
- "owncloud"
|
||||
- Owncloud
|
||||
- "sharepoint"
|
||||
- Sharepoint
|
||||
- Sharepoint Online, authenticated by Microsoft account.
|
||||
- "sharepoint-ntlm"
|
||||
- Sharepoint with NTLM authentication
|
||||
- Sharepoint with NTLM authentication. Usually self-hosted or on-premises.
|
||||
- "other"
|
||||
- Other site/service or software
|
||||
|
||||
#### --webdav-user
|
||||
|
||||
User name
|
||||
User name. In case NTLM authentication is used, the username should be in the format 'Domain\User'.
|
||||
|
||||
- Config: user
|
||||
- Env Var: RCLONE_WEBDAV_USER
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
In case vendor mode `sharepoint-ntlm` is used, the user name is in the form `DOMAIN\user`
|
||||
|
||||
#### --webdav-pass
|
||||
|
||||
Password.
|
||||
@@ -187,6 +185,19 @@ Command to run to get a bearer token
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --webdav-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
|
||||
See: the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Default encoding is Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8 for sharepoint-ntlm or identity otherwise.
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_WEBDAV_ENCODING
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
## Provider notes ##
|
||||
|
||||
@@ -128,6 +128,26 @@ from filenames during upload.
|
||||
|
||||
Here are the standard options specific to zoho (Zoho).
|
||||
|
||||
#### --zoho-client-id
|
||||
|
||||
OAuth Client Id
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_ZOHO_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-client-secret
|
||||
|
||||
OAuth Client Secret
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_ZOHO_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-region
|
||||
|
||||
Zoho region to connect to. You'll have to use the region you organization is registered in.
|
||||
@@ -150,6 +170,35 @@ Zoho region to connect to. You'll have to use the region you organization is reg
|
||||
|
||||
Here are the advanced options specific to zoho (Zoho).
|
||||
|
||||
#### --zoho-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_ZOHO_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-auth-url
|
||||
|
||||
Auth server URL.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_ZOHO_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-token-url
|
||||
|
||||
Token server url.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_ZOHO_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
|
||||
@@ -1 +1 @@
|
||||
v1.55.0
|
||||
v1.56.0
|
||||
37
fs/cache/cache.go
vendored
37
fs/cache/cache.go
vendored
@@ -12,14 +12,26 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
c = cache.New()
|
||||
once sync.Once // creation
|
||||
c *cache.Cache
|
||||
mu sync.Mutex // mutex to protect remap
|
||||
remap = map[string]string{} // map user supplied names to canonical names
|
||||
)
|
||||
|
||||
// Create the cache just once
|
||||
func createOnFirstUse() {
|
||||
once.Do(func() {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
c = cache.New()
|
||||
c.SetExpireDuration(ci.FsCacheExpireDuration)
|
||||
c.SetExpireInterval(ci.FsCacheExpireInterval)
|
||||
})
|
||||
}
|
||||
|
||||
// Canonicalize looks up fsString in the mapping from user supplied
|
||||
// names to canonical names and return the canonical form
|
||||
func Canonicalize(fsString string) string {
|
||||
createOnFirstUse()
|
||||
mu.Lock()
|
||||
canonicalName, ok := remap[fsString]
|
||||
mu.Unlock()
|
||||
@@ -43,10 +55,11 @@ func addMapping(fsString, canonicalName string) {
|
||||
// GetFn gets an fs.Fs named fsString either from the cache or creates
|
||||
// it afresh with the create function
|
||||
func GetFn(ctx context.Context, fsString string, create func(ctx context.Context, fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
|
||||
fsString = Canonicalize(fsString)
|
||||
createOnFirstUse()
|
||||
canonicalFsString := Canonicalize(fsString)
|
||||
created := false
|
||||
value, err := c.Get(fsString, func(fsString string) (f interface{}, ok bool, err error) {
|
||||
f, err = create(ctx, fsString)
|
||||
value, err := c.Get(canonicalFsString, func(canonicalFsString string) (f interface{}, ok bool, err error) {
|
||||
f, err = create(ctx, fsString) // always create the backend with the original non-canonicalised string
|
||||
ok = err == nil || err == fs.ErrorIsFile
|
||||
created = ok
|
||||
return f, ok, err
|
||||
@@ -58,19 +71,19 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
|
||||
// Check we stored the Fs at the canonical name
|
||||
if created {
|
||||
canonicalName := fs.ConfigString(f)
|
||||
if canonicalName != fsString {
|
||||
if canonicalName != canonicalFsString {
|
||||
// Note that if err == fs.ErrorIsFile at this moment
|
||||
// then we can't rename the remote as it will have the
|
||||
// wrong error status, we need to add a new one.
|
||||
if err == nil {
|
||||
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", fsString, canonicalName)
|
||||
value, found := c.Rename(fsString, canonicalName)
|
||||
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", canonicalFsString, canonicalName)
|
||||
value, found := c.Rename(canonicalFsString, canonicalName)
|
||||
if found {
|
||||
f = value.(fs.Fs)
|
||||
}
|
||||
addMapping(fsString, canonicalName)
|
||||
addMapping(canonicalFsString, canonicalName)
|
||||
} else {
|
||||
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", fsString, canonicalName)
|
||||
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", canonicalFsString, canonicalName)
|
||||
Put(canonicalName, f)
|
||||
}
|
||||
}
|
||||
@@ -80,6 +93,7 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
|
||||
|
||||
// Pin f into the cache until Unpin is called
|
||||
func Pin(f fs.Fs) {
|
||||
createOnFirstUse()
|
||||
c.Pin(fs.ConfigString(f))
|
||||
}
|
||||
|
||||
@@ -97,6 +111,7 @@ func PinUntilFinalized(f fs.Fs, x interface{}) {
|
||||
|
||||
// Unpin f from the cache
|
||||
func Unpin(f fs.Fs) {
|
||||
createOnFirstUse()
|
||||
c.Pin(fs.ConfigString(f))
|
||||
}
|
||||
|
||||
@@ -127,6 +142,7 @@ func GetArr(ctx context.Context, fsStrings []string) (f []fs.Fs, err error) {
|
||||
|
||||
// Put puts an fs.Fs named fsString into the cache
|
||||
func Put(fsString string, f fs.Fs) {
|
||||
createOnFirstUse()
|
||||
canonicalName := fs.ConfigString(f)
|
||||
c.Put(canonicalName, f)
|
||||
addMapping(fsString, canonicalName)
|
||||
@@ -136,15 +152,18 @@ func Put(fsString string, f fs.Fs) {
|
||||
//
|
||||
// Returns number of entries deleted
|
||||
func ClearConfig(name string) (deleted int) {
|
||||
createOnFirstUse()
|
||||
return c.DeletePrefix(name + ":")
|
||||
}
|
||||
|
||||
// Clear removes everything from the cache
|
||||
func Clear() {
|
||||
createOnFirstUse()
|
||||
c.Clear()
|
||||
}
|
||||
|
||||
// Entries returns the number of entries in the cache
|
||||
func Entries() int {
|
||||
createOnFirstUse()
|
||||
return c.Entries()
|
||||
}
|
||||
|
||||
32
fs/cache/cache_test.go
vendored
32
fs/cache/cache_test.go
vendored
@@ -33,7 +33,7 @@ func mockNewFs(t *testing.T) (func(), func(ctx context.Context, path string) (fs
|
||||
panic("unreachable")
|
||||
}
|
||||
cleanup := func() {
|
||||
c.Clear()
|
||||
Clear()
|
||||
}
|
||||
return cleanup, create
|
||||
}
|
||||
@@ -42,12 +42,12 @@ func TestGet(t *testing.T) {
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
f, err := GetFn(context.Background(), "mock:/", create)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
f2, err := GetFn(context.Background(), "mock:/", create)
|
||||
require.NoError(t, err)
|
||||
@@ -59,13 +59,13 @@ func TestGetFile(t *testing.T) {
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
f, err := GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.NotNil(t, f)
|
||||
|
||||
assert.Equal(t, 2, c.Entries())
|
||||
assert.Equal(t, 2, Entries())
|
||||
|
||||
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
@@ -85,13 +85,13 @@ func TestGetFile2(t *testing.T) {
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
f, err := GetFn(context.Background(), "mock:file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.NotNil(t, f)
|
||||
|
||||
assert.Equal(t, 2, c.Entries())
|
||||
assert.Equal(t, 2, Entries())
|
||||
|
||||
f2, err := GetFn(context.Background(), "mock:file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
@@ -111,13 +111,13 @@ func TestGetError(t *testing.T) {
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
f, err := GetFn(context.Background(), "mock:/error", create)
|
||||
require.Equal(t, errSentinel, err)
|
||||
require.Equal(t, nil, f)
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
}
|
||||
|
||||
func TestPut(t *testing.T) {
|
||||
@@ -126,17 +126,17 @@ func TestPut(t *testing.T) {
|
||||
|
||||
f := mockfs.NewFs(context.Background(), "mock", "/alien")
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
Put("mock:/alien", f)
|
||||
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
fNew, err := GetFn(context.Background(), "mock:/alien", create)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
// Check canonicalisation
|
||||
|
||||
@@ -146,7 +146,7 @@ func TestPut(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
}
|
||||
|
||||
@@ -170,7 +170,7 @@ func TestClearConfig(t *testing.T) {
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
_, err := GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
@@ -190,11 +190,11 @@ func TestClear(t *testing.T) {
|
||||
_, err := GetFn(context.Background(), "mock:/", create)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
Clear()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
}
|
||||
|
||||
func TestEntries(t *testing.T) {
|
||||
|
||||
@@ -123,6 +123,8 @@ type ConfigInfo struct {
|
||||
RefreshTimes bool
|
||||
NoConsole bool
|
||||
TrafficClass uint8
|
||||
FsCacheExpireDuration time.Duration
|
||||
FsCacheExpireInterval time.Duration
|
||||
}
|
||||
|
||||
// NewConfig creates a new config with everything set to the default
|
||||
@@ -160,6 +162,8 @@ func NewConfig() *ConfigInfo {
|
||||
c.MultiThreadStreams = 4
|
||||
|
||||
c.TrackRenamesStrategy = "hash"
|
||||
c.FsCacheExpireDuration = 300 * time.Second
|
||||
c.FsCacheExpireInterval = 60 * time.Second
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -2,9 +2,11 @@ package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
)
|
||||
|
||||
// Authorize is for remote authorization of headless machines.
|
||||
@@ -16,33 +18,61 @@ import (
|
||||
func Authorize(ctx context.Context, args []string, noAutoBrowser bool) error {
|
||||
ctx = suppressConfirm(ctx)
|
||||
switch len(args) {
|
||||
case 1, 3:
|
||||
case 1, 2, 3:
|
||||
default:
|
||||
return errors.Errorf("invalid number of arguments: %d", len(args))
|
||||
}
|
||||
newType := args[0]
|
||||
f := fs.MustFind(newType)
|
||||
if f.Config == nil {
|
||||
return errors.Errorf("can't authorize fs %q", newType)
|
||||
Type := args[0] // FIXME could read this from input
|
||||
ri, err := fs.Find(Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ri.Config == nil {
|
||||
return errors.Errorf("can't authorize fs %q", Type)
|
||||
}
|
||||
// Name used for temporary fs
|
||||
name := "**temp-fs**"
|
||||
|
||||
// Make sure we delete it
|
||||
defer DeleteRemote(name)
|
||||
// Config map for remote
|
||||
inM := configmap.Simple{}
|
||||
|
||||
// Indicate that we are running rclone authorize
|
||||
Data.SetValue(name, ConfigAuthorize, "true")
|
||||
inM[ConfigAuthorize] = "true"
|
||||
if noAutoBrowser {
|
||||
Data.SetValue(name, ConfigAuthNoBrowser, "true")
|
||||
inM[ConfigAuthNoBrowser] = "true"
|
||||
}
|
||||
|
||||
if len(args) == 3 {
|
||||
Data.SetValue(name, ConfigClientID, args[1])
|
||||
Data.SetValue(name, ConfigClientSecret, args[2])
|
||||
// Add extra parameters if supplied
|
||||
if len(args) == 2 {
|
||||
err := inM.Decode(args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(args) == 3 {
|
||||
inM[ConfigClientID] = args[1]
|
||||
inM[ConfigClientSecret] = args[2]
|
||||
}
|
||||
|
||||
m := fs.ConfigMap(f, name, nil)
|
||||
f.Config(ctx, name, m)
|
||||
// Name used for temporary remote
|
||||
name := "**temp-fs**"
|
||||
|
||||
m := fs.ConfigMap(ri, name, inM)
|
||||
outM := configmap.Simple{}
|
||||
m.ClearSetters()
|
||||
m.AddSetter(outM)
|
||||
m.AddGetter(outM, configmap.PriorityNormal)
|
||||
|
||||
ri.Config(ctx, name, m)
|
||||
|
||||
// Print the code for the user to paste
|
||||
out := outM["token"]
|
||||
|
||||
// If received a config blob, then return one
|
||||
if len(args) == 2 {
|
||||
out, err = outM.Encode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste\n", out)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -128,6 +128,8 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &ci.RefreshTimes, "refresh-times", "", ci.RefreshTimes, "Refresh the modtime of remote files.")
|
||||
flags.BoolVarP(flagSet, &ci.NoConsole, "no-console", "", ci.NoConsole, "Hide console window. Supported on Windows only.")
|
||||
flags.StringVarP(flagSet, &dscp, "dscp", "", "", "Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21.")
|
||||
flags.DurationVarP(flagSet, &ci.FsCacheExpireDuration, "fs-cache-expire-duration", "", ci.FsCacheExpireDuration, "cache remotes for this long (0 to disable caching)")
|
||||
flags.DurationVarP(flagSet, &ci.FsCacheExpireInterval, "fs-cache-expire-interval", "", ci.FsCacheExpireInterval, "interval to check for expired remotes")
|
||||
}
|
||||
|
||||
// ParseHeaders converts the strings passed in via the header flags into HTTPOptions
|
||||
|
||||
@@ -2,8 +2,24 @@
|
||||
package configmap
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Priority of getters
|
||||
type Priority int8
|
||||
|
||||
// Priority levels for AddGetter
|
||||
const (
|
||||
PriorityNormal Priority = iota
|
||||
PriorityConfig // use for reading from the config
|
||||
PriorityDefault // use for default values
|
||||
PriorityMax
|
||||
)
|
||||
|
||||
// Getter provides an interface to get config items
|
||||
@@ -29,9 +45,13 @@ type Mapper interface {
|
||||
// Map provides a wrapper around multiple Setter and
|
||||
// Getter interfaces.
|
||||
type Map struct {
|
||||
setters []Setter
|
||||
getters []Getter
|
||||
override []Getter
|
||||
setters []Setter
|
||||
getters []getprio
|
||||
}
|
||||
|
||||
type getprio struct {
|
||||
getter Getter
|
||||
priority Priority
|
||||
}
|
||||
|
||||
// New returns an empty Map
|
||||
@@ -39,18 +59,12 @@ func New() *Map {
|
||||
return &Map{}
|
||||
}
|
||||
|
||||
// AddGetter appends a getter onto the end of the getters
|
||||
func (c *Map) AddGetter(getter Getter) *Map {
|
||||
c.getters = append(c.getters, getter)
|
||||
return c
|
||||
}
|
||||
|
||||
// AddOverrideGetter appends a getter onto the end of the getters
|
||||
//
|
||||
// It also appends it onto the override getters for GetOverride
|
||||
func (c *Map) AddOverrideGetter(getter Getter) *Map {
|
||||
c.getters = append(c.getters, getter)
|
||||
c.override = append(c.override, getter)
|
||||
// AddGetter appends a getter onto the end of the getters in priority order
|
||||
func (c *Map) AddGetter(getter Getter, priority Priority) *Map {
|
||||
c.getters = append(c.getters, getprio{getter, priority})
|
||||
sort.SliceStable(c.getters, func(i, j int) bool {
|
||||
return c.getters[i].priority < c.getters[j].priority
|
||||
})
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -60,12 +74,34 @@ func (c *Map) AddSetter(setter Setter) *Map {
|
||||
return c
|
||||
}
|
||||
|
||||
// get gets an item with the key passed in and return the value from
|
||||
// the first getter. If the item is found then it returns true,
|
||||
// otherwise false.
|
||||
func (c *Map) get(key string, getters []Getter) (value string, ok bool) {
|
||||
for _, do := range getters {
|
||||
value, ok = do.Get(key)
|
||||
// ClearSetters removes all the setters set so far
|
||||
func (c *Map) ClearSetters() *Map {
|
||||
c.setters = nil
|
||||
return c
|
||||
}
|
||||
|
||||
// ClearGetters removes all the getters with the priority given
|
||||
func (c *Map) ClearGetters(priority Priority) *Map {
|
||||
getters := c.getters[:0]
|
||||
for _, item := range c.getters {
|
||||
if item.priority != priority {
|
||||
getters = append(getters, item)
|
||||
}
|
||||
}
|
||||
c.getters = getters
|
||||
return c
|
||||
}
|
||||
|
||||
// GetPriority gets an item with the key passed in and return the
|
||||
// value from the first getter to return a result with priority <=
|
||||
// maxPriority. If the item is found then it returns true, otherwise
|
||||
// false.
|
||||
func (c *Map) GetPriority(key string, maxPriority Priority) (value string, ok bool) {
|
||||
for _, item := range c.getters {
|
||||
if item.priority > maxPriority {
|
||||
break
|
||||
}
|
||||
value, ok = item.getter.Get(key)
|
||||
if ok {
|
||||
return value, ok
|
||||
}
|
||||
@@ -77,14 +113,7 @@ func (c *Map) get(key string, getters []Getter) (value string, ok bool) {
|
||||
// the first getter. If the item is found then it returns true,
|
||||
// otherwise false.
|
||||
func (c *Map) Get(key string) (value string, ok bool) {
|
||||
return c.get(key, c.getters)
|
||||
}
|
||||
|
||||
// GetOverride gets an item with the key passed in and return the
|
||||
// value from the first override getter. If the item is found then it
|
||||
// returns true, otherwise false.
|
||||
func (c *Map) GetOverride(key string) (value string, ok bool) {
|
||||
return c.get(key, c.override)
|
||||
return c.GetPriority(key, PriorityMax)
|
||||
}
|
||||
|
||||
// Set sets an item into all the stored setters.
|
||||
@@ -135,3 +164,38 @@ func (c Simple) String() string {
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
||||
// Encode from c into a string suitable for putting on the command line
|
||||
func (c Simple) Encode() (string, error) {
|
||||
if len(c) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
buf, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "encode simple map")
|
||||
}
|
||||
return base64.RawStdEncoding.EncodeToString(buf), nil
|
||||
}
|
||||
|
||||
// Decode an Encode~d string in into c
|
||||
func (c Simple) Decode(in string) error {
|
||||
// Remove all whitespace from the input string
|
||||
in = strings.Map(func(r rune) rune {
|
||||
if unicode.IsSpace(r) {
|
||||
return -1
|
||||
}
|
||||
return r
|
||||
}, in)
|
||||
if len(in) == 0 {
|
||||
return nil
|
||||
}
|
||||
decodedM, err := base64.RawStdEncoding.DecodeString(in)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "decode simple map")
|
||||
}
|
||||
err = json.Unmarshal(decodedM, &c)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "parse simple map")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package configmap
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -27,7 +29,7 @@ func TestConfigMapGet(t *testing.T) {
|
||||
"config1": "one",
|
||||
}
|
||||
|
||||
m.AddGetter(m1)
|
||||
m.AddGetter(m1, PriorityNormal)
|
||||
|
||||
value, found = m.Get("config1")
|
||||
assert.Equal(t, "one", value)
|
||||
@@ -42,7 +44,7 @@ func TestConfigMapGet(t *testing.T) {
|
||||
"config2": "two2",
|
||||
}
|
||||
|
||||
m.AddGetter(m2)
|
||||
m.AddGetter(m2, PriorityNormal)
|
||||
|
||||
value, found = m.Get("config1")
|
||||
assert.Equal(t, "one", value)
|
||||
@@ -88,56 +90,160 @@ func TestConfigMapSet(t *testing.T) {
|
||||
"config1": "beetroot",
|
||||
"config2": "potato",
|
||||
}, m2)
|
||||
|
||||
m.ClearSetters()
|
||||
|
||||
// Check that nothing gets set
|
||||
m.Set("config1", "BEETROOT")
|
||||
|
||||
assert.Equal(t, Simple{
|
||||
"config1": "beetroot",
|
||||
"config2": "potato",
|
||||
}, m1)
|
||||
assert.Equal(t, Simple{
|
||||
"config1": "beetroot",
|
||||
"config2": "potato",
|
||||
}, m2)
|
||||
|
||||
}
|
||||
|
||||
func TestConfigMapGetOverride(t *testing.T) {
|
||||
func TestConfigMapGetPriority(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
value, found := m.GetOverride("config1")
|
||||
value, found := m.GetPriority("config1", PriorityMax)
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
value, found = m.GetOverride("config2")
|
||||
value, found = m.GetPriority("config2", PriorityMax)
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
m1 := Simple{
|
||||
"config1": "one",
|
||||
"config3": "three",
|
||||
}
|
||||
|
||||
m.AddOverrideGetter(m1)
|
||||
m.AddGetter(m1, PriorityConfig)
|
||||
|
||||
value, found = m.GetOverride("config1")
|
||||
value, found = m.GetPriority("config1", PriorityNormal)
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
value, found = m.GetPriority("config2", PriorityNormal)
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
value, found = m.GetPriority("config3", PriorityNormal)
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
value, found = m.GetPriority("config1", PriorityConfig)
|
||||
assert.Equal(t, "one", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.GetOverride("config2")
|
||||
value, found = m.GetPriority("config2", PriorityConfig)
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
value, found = m.GetPriority("config3", PriorityConfig)
|
||||
assert.Equal(t, "three", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.GetPriority("config1", PriorityMax)
|
||||
assert.Equal(t, "one", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.GetPriority("config2", PriorityMax)
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
value, found = m.GetPriority("config3", PriorityMax)
|
||||
assert.Equal(t, "three", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
m2 := Simple{
|
||||
"config1": "one2",
|
||||
"config2": "two2",
|
||||
}
|
||||
|
||||
m.AddGetter(m2)
|
||||
m.AddGetter(m2, PriorityNormal)
|
||||
|
||||
value, found = m.GetOverride("config1")
|
||||
assert.Equal(t, "one", value)
|
||||
value, found = m.GetPriority("config1", PriorityNormal)
|
||||
assert.Equal(t, "one2", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.GetOverride("config2")
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
value, found = m.Get("config1")
|
||||
assert.Equal(t, "one", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.Get("config2")
|
||||
value, found = m.GetPriority("config2", PriorityNormal)
|
||||
assert.Equal(t, "two2", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.GetPriority("config3", PriorityNormal)
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
value, found = m.GetPriority("config1", PriorityConfig)
|
||||
assert.Equal(t, "one2", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.GetPriority("config2", PriorityConfig)
|
||||
assert.Equal(t, "two2", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.GetPriority("config3", PriorityConfig)
|
||||
assert.Equal(t, "three", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.GetPriority("config1", PriorityMax)
|
||||
assert.Equal(t, "one2", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.GetPriority("config2", PriorityMax)
|
||||
assert.Equal(t, "two2", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.GetPriority("config3", PriorityMax)
|
||||
assert.Equal(t, "three", value)
|
||||
assert.Equal(t, true, found)
|
||||
}
|
||||
|
||||
func TestConfigMapClearGetters(t *testing.T) {
|
||||
m := New()
|
||||
m1 := Simple{}
|
||||
m2 := Simple{}
|
||||
m3 := Simple{}
|
||||
m.AddGetter(m1, PriorityNormal)
|
||||
m.AddGetter(m2, PriorityDefault)
|
||||
m.AddGetter(m3, PriorityConfig)
|
||||
assert.Equal(t, []getprio{
|
||||
{m1, PriorityNormal},
|
||||
{m3, PriorityConfig},
|
||||
{m2, PriorityDefault},
|
||||
}, m.getters)
|
||||
m.ClearGetters(PriorityConfig)
|
||||
assert.Equal(t, []getprio{
|
||||
{m1, PriorityNormal},
|
||||
{m2, PriorityDefault},
|
||||
}, m.getters)
|
||||
m.ClearGetters(PriorityNormal)
|
||||
assert.Equal(t, []getprio{
|
||||
{m2, PriorityDefault},
|
||||
}, m.getters)
|
||||
m.ClearGetters(PriorityDefault)
|
||||
assert.Equal(t, []getprio{}, m.getters)
|
||||
m.ClearGetters(PriorityDefault)
|
||||
assert.Equal(t, []getprio{}, m.getters)
|
||||
}
|
||||
|
||||
func TestConfigMapClearSetters(t *testing.T) {
|
||||
m := New()
|
||||
m1 := Simple{}
|
||||
m2 := Simple{}
|
||||
m3 := Simple{}
|
||||
m.AddSetter(m1)
|
||||
m.AddSetter(m2)
|
||||
m.AddSetter(m3)
|
||||
assert.Equal(t, []Setter{m1, m2, m3}, m.setters)
|
||||
m.ClearSetters()
|
||||
assert.Equal(t, []Setter(nil), m.setters)
|
||||
}
|
||||
|
||||
func TestSimpleString(t *testing.T) {
|
||||
@@ -163,3 +269,91 @@ func TestSimpleString(t *testing.T) {
|
||||
"apple": "",
|
||||
}.String())
|
||||
}
|
||||
|
||||
func TestSimpleEncode(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in Simple
|
||||
want string
|
||||
}{
|
||||
{
|
||||
in: Simple{},
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
in: Simple{
|
||||
"one": "potato",
|
||||
},
|
||||
want: "eyJvbmUiOiJwb3RhdG8ifQ",
|
||||
},
|
||||
{
|
||||
in: Simple{
|
||||
"one": "potato",
|
||||
"two": "",
|
||||
},
|
||||
want: "eyJvbmUiOiJwb3RhdG8iLCJ0d28iOiIifQ",
|
||||
},
|
||||
} {
|
||||
got, err := test.in.Encode()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.want, got)
|
||||
gotM := Simple{}
|
||||
err = gotM.Decode(got)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.in, gotM)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleDecode(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want Simple
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
in: "",
|
||||
want: Simple{},
|
||||
},
|
||||
{
|
||||
in: "eyJvbmUiOiJwb3RhdG8ifQ",
|
||||
want: Simple{
|
||||
"one": "potato",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: " e yJvbm UiOiJwb\r\n 3Rhd\tG8ifQ\n\n ",
|
||||
want: Simple{
|
||||
"one": "potato",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "eyJvbmUiOiJwb3RhdG8iLCJ0d28iOiIifQ",
|
||||
want: Simple{
|
||||
"one": "potato",
|
||||
"two": "",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "!!!!!",
|
||||
want: Simple{},
|
||||
wantErr: "decode simple map",
|
||||
},
|
||||
{
|
||||
in: base64.RawStdEncoding.EncodeToString([]byte(`null`)),
|
||||
want: Simple{},
|
||||
},
|
||||
{
|
||||
in: base64.RawStdEncoding.EncodeToString([]byte(`rubbish`)),
|
||||
want: Simple{},
|
||||
wantErr: "parse simple map",
|
||||
},
|
||||
} {
|
||||
got := Simple{}
|
||||
err := got.Decode(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
if test.wantErr == "" {
|
||||
require.NoError(t, err, test.in)
|
||||
} else {
|
||||
assert.Contains(t, err.Error(), test.wantErr, test.in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
56
fs/fs.go
56
fs/fs.go
@@ -125,6 +125,38 @@ func (os Options) Get(name string) *Option {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Overridden discovers which config items have been overridden in the
|
||||
// configmap passed in, either by the config string, command line
|
||||
// flags or environment variables
|
||||
func (os Options) Overridden(m *configmap.Map) configmap.Simple {
|
||||
var overridden = configmap.Simple{}
|
||||
for i := range os {
|
||||
opt := &os[i]
|
||||
value, isSet := m.GetPriority(opt.Name, configmap.PriorityNormal)
|
||||
if isSet {
|
||||
overridden.Set(opt.Name, value)
|
||||
}
|
||||
}
|
||||
return overridden
|
||||
}
|
||||
|
||||
// NonDefault discovers which config values aren't at their default
|
||||
func (os Options) NonDefault(m configmap.Getter) configmap.Simple {
|
||||
var nonDefault = configmap.Simple{}
|
||||
for i := range os {
|
||||
opt := &os[i]
|
||||
value, isSet := m.Get(opt.Name)
|
||||
if !isSet {
|
||||
continue
|
||||
}
|
||||
defaultValue := fmt.Sprint(opt.Default)
|
||||
if value != defaultValue {
|
||||
nonDefault.Set(opt.Name, value)
|
||||
}
|
||||
}
|
||||
return nonDefault
|
||||
}
|
||||
|
||||
// OptionVisibility controls whether the options are visible in the
|
||||
// configurator or the command line.
|
||||
type OptionVisibility byte
|
||||
@@ -1321,28 +1353,28 @@ func ConfigMap(fsInfo *RegInfo, configName string, connectionStringConfig config
|
||||
|
||||
// Config from connection string
|
||||
if len(connectionStringConfig) > 0 {
|
||||
config.AddOverrideGetter(connectionStringConfig)
|
||||
config.AddGetter(connectionStringConfig, configmap.PriorityNormal)
|
||||
}
|
||||
|
||||
// flag values
|
||||
if fsInfo != nil {
|
||||
config.AddOverrideGetter(®InfoValues{fsInfo, false})
|
||||
config.AddGetter(®InfoValues{fsInfo, false}, configmap.PriorityNormal)
|
||||
}
|
||||
|
||||
// remote specific environment vars
|
||||
config.AddOverrideGetter(configEnvVars(configName))
|
||||
config.AddGetter(configEnvVars(configName), configmap.PriorityNormal)
|
||||
|
||||
// backend specific environment vars
|
||||
if fsInfo != nil {
|
||||
config.AddOverrideGetter(optionEnvVars{fsInfo: fsInfo})
|
||||
config.AddGetter(optionEnvVars{fsInfo: fsInfo}, configmap.PriorityNormal)
|
||||
}
|
||||
|
||||
// config file
|
||||
config.AddGetter(getConfigFile(configName))
|
||||
config.AddGetter(getConfigFile(configName), configmap.PriorityConfig)
|
||||
|
||||
// default values
|
||||
if fsInfo != nil {
|
||||
config.AddGetter(®InfoValues{fsInfo, true})
|
||||
config.AddGetter(®InfoValues{fsInfo, true}, configmap.PriorityDefault)
|
||||
}
|
||||
|
||||
// Set Config
|
||||
@@ -1381,17 +1413,7 @@ func NewFs(ctx context.Context, path string) (Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Now discover which config items have been overridden,
|
||||
// either by the config string, command line flags or
|
||||
// environment variables
|
||||
var overridden = configmap.Simple{}
|
||||
for i := range fsInfo.Options {
|
||||
opt := &fsInfo.Options[i]
|
||||
value, isSet := config.GetOverride(opt.Name)
|
||||
if isSet {
|
||||
overridden.Set(opt.Name, value)
|
||||
}
|
||||
}
|
||||
overridden := fsInfo.Options.Overridden(config)
|
||||
if len(overridden) > 0 {
|
||||
extraConfig := overridden.String()
|
||||
//Debugf(nil, "detected overriden config %q", extraConfig)
|
||||
|
||||
@@ -190,6 +190,43 @@ func TestOptionsGet(t *testing.T) {
|
||||
assert.Nil(t, opt)
|
||||
}
|
||||
|
||||
func TestOptionsOveridden(t *testing.T) {
|
||||
m := configmap.New()
|
||||
m1 := configmap.Simple{
|
||||
"nounc": "m1",
|
||||
"copy_links": "m1",
|
||||
}
|
||||
m.AddGetter(m1, configmap.PriorityNormal)
|
||||
m2 := configmap.Simple{
|
||||
"nounc": "m2",
|
||||
"case_insensitive": "m2",
|
||||
}
|
||||
m.AddGetter(m2, configmap.PriorityConfig)
|
||||
m3 := configmap.Simple{
|
||||
"nounc": "m3",
|
||||
}
|
||||
m.AddGetter(m3, configmap.PriorityDefault)
|
||||
got := testOptions.Overridden(m)
|
||||
assert.Equal(t, configmap.Simple{
|
||||
"copy_links": "m1",
|
||||
"nounc": "m1",
|
||||
}, got)
|
||||
}
|
||||
|
||||
func TestOptionsNonDefault(t *testing.T) {
|
||||
m := configmap.Simple{}
|
||||
got := testOptions.NonDefault(m)
|
||||
assert.Equal(t, configmap.Simple{}, got)
|
||||
|
||||
m["case_insensitive"] = "false"
|
||||
got = testOptions.NonDefault(m)
|
||||
assert.Equal(t, configmap.Simple{}, got)
|
||||
|
||||
m["case_insensitive"] = "true"
|
||||
got = testOptions.NonDefault(m)
|
||||
assert.Equal(t, configmap.Simple{"case_insensitive": "true"}, got)
|
||||
}
|
||||
|
||||
func TestOptionMarshalJSON(t *testing.T) {
|
||||
out, err := json.MarshalIndent(&caseInsensitiveOption, "", "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -4,21 +4,64 @@ package rc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
)
|
||||
|
||||
// GetFsNamed gets an fs.Fs named fsName either from the cache or creates it afresh
|
||||
func GetFsNamed(ctx context.Context, in Params, fsName string) (f fs.Fs, err error) {
|
||||
fsString, err := in.GetString(fsName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !IsErrParamInvalid(err) {
|
||||
return nil, err
|
||||
}
|
||||
fsString, err = getConfigMap(in, fsName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return cache.Get(ctx, fsString)
|
||||
}
|
||||
|
||||
// getConfigMap gets the config as a map from in and converts it to a
|
||||
// config string
|
||||
//
|
||||
// It uses the special parameters _name to name the remote and _root
|
||||
// to make the root of the remote.
|
||||
func getConfigMap(in Params, fsName string) (fsString string, err error) {
|
||||
var m configmap.Simple
|
||||
err = in.GetStruct(fsName, &m)
|
||||
if err != nil {
|
||||
return fsString, err
|
||||
}
|
||||
pop := func(key string) string {
|
||||
value := m[key]
|
||||
delete(m, key)
|
||||
return value
|
||||
}
|
||||
Type := pop("type")
|
||||
name := pop("_name")
|
||||
root := pop("_root")
|
||||
if name != "" {
|
||||
fsString = name
|
||||
} else if Type != "" {
|
||||
fsString = ":" + Type
|
||||
} else {
|
||||
return fsString, errors.New(`couldn't find "type" or "_name" in JSON config definition`)
|
||||
}
|
||||
config := m.String()
|
||||
if config != "" {
|
||||
fsString += ","
|
||||
fsString += config
|
||||
}
|
||||
fsString += ":"
|
||||
fsString += root
|
||||
return fsString, nil
|
||||
}
|
||||
|
||||
// GetFs gets an fs.Fs named "fs" either from the cache or creates it afresh
|
||||
func GetFs(ctx context.Context, in Params) (f fs.Fs, err error) {
|
||||
return GetFsNamed(ctx, in, "fs")
|
||||
|
||||
@@ -2,6 +2,7 @@ package rc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
@@ -13,6 +14,8 @@ import (
|
||||
func mockNewFs(t *testing.T) func() {
|
||||
f := mockfs.NewFs(context.Background(), "mock", "mock")
|
||||
cache.Put("/", f)
|
||||
cache.Put("mock:/", f)
|
||||
cache.Put(":mock:/", f)
|
||||
return func() {
|
||||
cache.Clear()
|
||||
}
|
||||
@@ -36,6 +39,98 @@ func TestGetFsNamed(t *testing.T) {
|
||||
assert.Nil(t, f)
|
||||
}
|
||||
|
||||
func TestGetFsNamedStruct(t *testing.T) {
|
||||
defer mockNewFs(t)()
|
||||
|
||||
in := Params{
|
||||
"potato": Params{
|
||||
"type": "mock",
|
||||
"_root": "/",
|
||||
},
|
||||
}
|
||||
f, err := GetFsNamed(context.Background(), in, "potato")
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, f)
|
||||
|
||||
in = Params{
|
||||
"potato": Params{
|
||||
"_name": "mock",
|
||||
"_root": "/",
|
||||
},
|
||||
}
|
||||
f, err = GetFsNamed(context.Background(), in, "potato")
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, f)
|
||||
}
|
||||
|
||||
func TestGetConfigMap(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in Params
|
||||
fsName string
|
||||
wantFsString string
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
in: Params{
|
||||
"Fs": Params{},
|
||||
},
|
||||
fsName: "Fs",
|
||||
wantErr: `couldn't find "type" or "_name" in JSON config definition`,
|
||||
},
|
||||
{
|
||||
in: Params{
|
||||
"Fs": Params{
|
||||
"notastring": true,
|
||||
},
|
||||
},
|
||||
fsName: "Fs",
|
||||
wantErr: `cannot unmarshal bool`,
|
||||
},
|
||||
{
|
||||
in: Params{
|
||||
"Fs": Params{
|
||||
"_name": "potato",
|
||||
},
|
||||
},
|
||||
fsName: "Fs",
|
||||
wantFsString: "potato:",
|
||||
},
|
||||
{
|
||||
in: Params{
|
||||
"Fs": Params{
|
||||
"type": "potato",
|
||||
},
|
||||
},
|
||||
fsName: "Fs",
|
||||
wantFsString: ":potato:",
|
||||
},
|
||||
{
|
||||
in: Params{
|
||||
"Fs": Params{
|
||||
"type": "sftp",
|
||||
"_name": "potato",
|
||||
"parameter": "42",
|
||||
"parameter2": "true",
|
||||
"_root": "/path/to/somewhere",
|
||||
},
|
||||
},
|
||||
fsName: "Fs",
|
||||
wantFsString: "potato,parameter='42',parameter2='true':/path/to/somewhere",
|
||||
},
|
||||
} {
|
||||
gotFsString, gotErr := getConfigMap(test.in, test.fsName)
|
||||
what := fmt.Sprintf("%+v", test.in)
|
||||
assert.Equal(t, test.wantFsString, gotFsString, what)
|
||||
if test.wantErr == "" {
|
||||
assert.NoError(t, gotErr)
|
||||
} else {
|
||||
require.Error(t, gotErr)
|
||||
assert.Contains(t, gotErr.Error(), test.wantErr)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFs(t *testing.T) {
|
||||
defer mockNewFs(t)()
|
||||
|
||||
|
||||
@@ -279,3 +279,26 @@ func (p Params) GetDuration(key string) (time.Duration, error) {
|
||||
}
|
||||
return duration, nil
|
||||
}
|
||||
|
||||
// Error creates the standard response for an errored rc call using an
|
||||
// rc.Param from a path, input Params, error and a suggested HTTP
|
||||
// response code.
|
||||
//
|
||||
// It returns a Params and an updated status code
|
||||
func Error(path string, in Params, err error, status int) (Params, int) {
|
||||
// Adjust the status code for some well known errors
|
||||
errOrig := errors.Cause(err)
|
||||
switch {
|
||||
case errOrig == fs.ErrorDirNotFound || errOrig == fs.ErrorObjectNotFound:
|
||||
status = http.StatusNotFound
|
||||
case IsErrParamInvalid(err) || IsErrParamNotFound(err):
|
||||
status = http.StatusBadRequest
|
||||
}
|
||||
result := Params{
|
||||
"status": status,
|
||||
"error": err.Error(),
|
||||
"input": in,
|
||||
"path": path,
|
||||
}
|
||||
return result, status
|
||||
}
|
||||
|
||||
@@ -169,21 +169,9 @@ func (s *Server) Serve() error {
|
||||
// writeError writes a formatted error to the output
|
||||
func writeError(path string, in rc.Params, w http.ResponseWriter, err error, status int) {
|
||||
fs.Errorf(nil, "rc: %q: error: %v", path, err)
|
||||
// Adjust the error return for some well known errors
|
||||
errOrig := errors.Cause(err)
|
||||
switch {
|
||||
case errOrig == fs.ErrorDirNotFound || errOrig == fs.ErrorObjectNotFound:
|
||||
status = http.StatusNotFound
|
||||
case rc.IsErrParamInvalid(err) || rc.IsErrParamNotFound(err):
|
||||
status = http.StatusBadRequest
|
||||
}
|
||||
params, status := rc.Error(path, in, err, status)
|
||||
w.WriteHeader(status)
|
||||
err = rc.WriteJSON(w, rc.Params{
|
||||
"status": status,
|
||||
"error": err.Error(),
|
||||
"input": in,
|
||||
"path": path,
|
||||
})
|
||||
err = rc.WriteJSON(w, params)
|
||||
if err != nil {
|
||||
// can't return the error at this point
|
||||
fs.Errorf(nil, "rc: writeError: failed to write JSON output from %#v: %v", in, err)
|
||||
|
||||
@@ -155,7 +155,9 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
// Input context - cancel this for graceful stop
|
||||
s.inCtx, s.inCancel = context.WithCancel(s.ctx)
|
||||
if s.noTraverse && s.deleteMode != fs.DeleteModeOff {
|
||||
fs.Errorf(nil, "Ignoring --no-traverse with sync")
|
||||
if !fi.HaveFilesFrom() {
|
||||
fs.Errorf(nil, "Ignoring --no-traverse with sync")
|
||||
}
|
||||
s.noTraverse = false
|
||||
}
|
||||
s.trackRenamesStrategy, err = parseTrackRenamesStrategy(ci.TrackRenamesStrategy)
|
||||
@@ -264,6 +266,9 @@ func (s *syncCopyMove) processError(err error) {
|
||||
// Cancel the march and stop the pipes
|
||||
s.inCancel()
|
||||
}
|
||||
} else if err == context.Canceled && s.inCtx.Err() != nil {
|
||||
// Ignore context Canceled if we have called s.inCancel()
|
||||
return
|
||||
}
|
||||
s.errorMu.Lock()
|
||||
defer s.errorMu.Unlock()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// Version of rclone
|
||||
var Version = "v1.55.0-DEV"
|
||||
var Version = "v1.56.0-DEV"
|
||||
|
||||
@@ -12,6 +12,7 @@ Make TesTrun have a []string of flags to try - that then makes it generic
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"math/rand"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // import all fs
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
@@ -70,6 +72,7 @@ func main() {
|
||||
log.Println("test_all should be run from the root of the rclone source code")
|
||||
log.Fatal(err)
|
||||
}
|
||||
configfile.LoadConfig(context.Background())
|
||||
|
||||
// Seed the random number generator
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
3
go.mod
3
go.mod
@@ -45,7 +45,7 @@ require (
|
||||
github.com/nsf/termbox-go v0.0.0-20210114135735-d04385b850e8
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.13.0
|
||||
github.com/pkg/sftp v1.12.0
|
||||
github.com/prometheus/client_golang v1.9.0
|
||||
github.com/prometheus/procfs v0.3.0 // indirect
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
|
||||
@@ -53,6 +53,7 @@ require (
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sevlyar/go-daemon v0.1.5
|
||||
github.com/shirou/gopsutil/v3 v3.21.3
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
||||
github.com/spf13/cobra v1.1.1
|
||||
|
||||
85
go.sum
85
go.sum
@@ -11,7 +11,6 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
@@ -45,7 +44,6 @@ github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9
|
||||
github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.2 h1:Aze/GQeAN1RRbGmnUJvUj+tFGBzFdIg3293/A9rbxC4=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.10 h1:r6fZHMaHD8B6LDCn0o5vyBFHIHrM6Ywwx7mb49lPItI=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
@@ -62,7 +60,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk=
|
||||
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
|
||||
@@ -70,6 +67,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
||||
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/Unknwon/goconfig v0.0.0-20200908083735-df7de6a44db8 h1:1TrMV1HmBApBbM+Hy7RCKZD6UlYWYIPPfoeXomG7+zE=
|
||||
github.com/Unknwon/goconfig v0.0.0-20200908083735-df7de6a44db8/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
@@ -111,8 +110,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/billziss-gh/cgofuse v1.4.0 h1:kju2jDmdNuDDCrxPob2ggmZr5Mj/odCjU1Y8kx0Th9E=
|
||||
github.com/billziss-gh/cgofuse v1.4.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/billziss-gh/cgofuse v1.5.0 h1:kH516I/s+Ab4diL/Y/ayFeUjjA8ey+JK12xDfBf4HEs=
|
||||
github.com/billziss-gh/cgofuse v1.5.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
@@ -121,7 +118,6 @@ github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2w
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201124182144-4031bdc69ded h1:WcPFZzCIqGt/TdFJHsOiX5dIlB/MUzrftltMhpjzfA8=
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201124182144-4031bdc69ded/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ=
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
|
||||
@@ -171,7 +167,6 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
@@ -182,7 +177,6 @@ github.com/dropbox/dropbox-sdk-go-unofficial v1.0.1-0.20210114204226-41fdcdae8a5
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813 h1:NgO45/5mBLRVfiXerEFzH6ikcZ7DNRPS639xFg3ENzU=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
@@ -217,12 +211,13 @@ github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgO
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
|
||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
@@ -239,7 +234,6 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -254,7 +248,6 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
@@ -267,10 +260,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
@@ -293,9 +284,7 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -337,7 +326,6 @@ github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
@@ -390,7 +378,6 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
@@ -410,7 +397,6 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6
|
||||
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
|
||||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/koofr/go-httpclient v0.0.0-20200420163713-93aa7c75b348 h1:Lrn8srO9JDBCf2iPjqy62stl49UDwoOxZ9/NGVi+fnk=
|
||||
github.com/koofr/go-httpclient v0.0.0-20200420163713-93aa7c75b348/go.mod h1:JBLy//Q5jzU3XSMxdONTD5EIj1LhTPktosxG2Bw1iho=
|
||||
@@ -419,7 +405,6 @@ github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a/go.mod h1:MRA
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
@@ -440,16 +425,13 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg=
|
||||
github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/minio/minio-go/v6 v6.0.46 h1:waExJtO53xrnsNX//7cSc1h3478wqTryDx4RVD7o26I=
|
||||
github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
|
||||
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
@@ -461,10 +443,8 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
@@ -536,7 +516,6 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
|
||||
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
|
||||
@@ -552,7 +531,6 @@ github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM=
|
||||
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
@@ -561,9 +539,7 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.3.0 h1:Uehi/mxLK0eiUc0H0++5tpMGTexB8wZ598MIgU8VpDM=
|
||||
github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
@@ -573,14 +549,12 @@ github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rfjakob/eme v1.1.1 h1:t+CgvcOn+eDvj2xdglxsSnkgg8LM8jwdxnV7OnsrTn0=
|
||||
github.com/rfjakob/eme v1.1.1/go.mod h1:U2bmx0hDj8EyDdcxmD5t3XHDnBFnyNNc22n1R4008eM=
|
||||
github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -590,12 +564,12 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sevlyar/go-daemon v0.1.5 h1:Zy/6jLbM8CfqJ4x4RPr7MJlSKt90f00kNM1D401C+Qk=
|
||||
github.com/sevlyar/go-daemon v0.1.5/go.mod h1:6dJpPatBT9eUwM5VCw9Bt6CdX9Tk6UWvhW3MebLDRKE=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/shirou/gopsutil/v3 v3.21.3 h1:wgcdAHZS2H6qy4JFewVTtqfiYxFzCeEJod/mLztdPG8=
|
||||
github.com/shirou/gopsutil/v3 v3.21.3/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
@@ -604,7 +578,6 @@ github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:s
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
@@ -631,7 +604,6 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
@@ -639,7 +611,6 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@@ -647,6 +618,10 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8 h1:IGJQmLBLYBdAknj21W3JsVof0yjEXfy1Q0K3YZebDOg=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tklauser/go-sysconf v0.3.4 h1:HT8SVixZd3IzLdfs/xlpq0jeSfTX57g1v6wB1EuzV7M=
|
||||
github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek=
|
||||
github.com/tklauser/numcpus v0.2.1 h1:ct88eFm+Q7m2ZfXJdan1xYoXKlmwsfP+k88q05KvlZc=
|
||||
github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
|
||||
@@ -684,32 +659,23 @@ go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.22.6 h1:BdkrbWrzDlV9dnbzoP7sfN+dHheJ4J9JOaYxcUDL+ok=
|
||||
go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM=
|
||||
@@ -730,15 +696,10 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9 h1:umElSU9WZirRdgu2yFHY0ayQkEnKiOC1TtM3fWXFnoU=
|
||||
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
@@ -763,7 +724,6 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
@@ -774,7 +734,6 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=
|
||||
@@ -815,7 +774,6 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
@@ -829,9 +787,7 @@ golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAG
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
@@ -845,9 +801,7 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -906,12 +860,11 @@ golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210313110737-8e9fff1a3a18 h1:jxr7/dEo+rR29uEBoLSWJ1tRHCFAMwFbGUU9nRqzpds=
|
||||
golang.org/x/sys v0.0.0-20210313110737-8e9fff1a3a18/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@@ -919,9 +872,7 @@ golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fq
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
@@ -939,7 +890,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
@@ -991,7 +941,6 @@ golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1009,7 +958,6 @@ google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
@@ -1022,9 +970,7 @@ google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
@@ -1049,7 +995,6 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940 h1:MRHtG0U6SnaUb+s+LhNE1qt1FQ1wlhqr5E4usBKC0uA=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
@@ -1080,10 +1025,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
@@ -1097,7 +1040,6 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
@@ -1106,7 +1048,6 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -1115,7 +1056,6 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk=
|
||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
@@ -1127,14 +1067,11 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
// +build cmount
|
||||
|
||||
package buildinfo
|
||||
|
||||
func init() {
|
||||
Tags = append(Tags, "cmount")
|
||||
}
|
||||
34
lib/buildinfo/osversion.go
Normal file
34
lib/buildinfo/osversion.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// +build !openbsd,!windows
|
||||
|
||||
package buildinfo
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/shirou/gopsutil/v3/host"
|
||||
)
|
||||
|
||||
// GetOSVersion returns OS version, kernel and bitness
|
||||
func GetOSVersion() (osVersion, osKernel string) {
|
||||
if platform, _, version, err := host.PlatformInformation(); err == nil && platform != "" {
|
||||
osVersion = platform
|
||||
if version != "" {
|
||||
osVersion += " " + version
|
||||
}
|
||||
}
|
||||
|
||||
if version, err := host.KernelVersion(); err == nil && version != "" {
|
||||
osKernel = version
|
||||
}
|
||||
|
||||
if arch, err := host.KernelArch(); err == nil && arch != "" {
|
||||
if strings.HasSuffix(arch, "64") && osVersion != "" {
|
||||
osVersion += " (64 bit)"
|
||||
}
|
||||
if osKernel != "" {
|
||||
osKernel += " (" + arch + ")"
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
13
lib/buildinfo/osversion_openbsd.go
Normal file
13
lib/buildinfo/osversion_openbsd.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// +build openbsd
|
||||
|
||||
package buildinfo
|
||||
|
||||
// gopsutil v3.21.3 fails to build on openbsd:
|
||||
// Error: .../go/pkg/mod/github.com/tklauser/go-sysconf@v0.3.4/sysconf_openbsd.go:22:28: undefined: unix.RLIMIT_NPROC
|
||||
// Error: .../go/pkg/mod/github.com/shirou/gopsutil/v3@v3.21.3/process/process.go:163:15: undefined: pidsWithContext
|
||||
// and so on...
|
||||
|
||||
// GetOSVersion returns OS version, kernel and bitness
|
||||
func GetOSVersion() (osVersion, osKernel string) {
|
||||
return "OpenBSD", ""
|
||||
}
|
||||
131
lib/buildinfo/osversion_windows.go
Normal file
131
lib/buildinfo/osversion_windows.go
Normal file
@@ -0,0 +1,131 @@
|
||||
// +build !openbsd !windows
|
||||
|
||||
package buildinfo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/shirou/gopsutil/v3/host"
|
||||
"golang.org/x/sys/windows"
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
// GetOSVersion returns OS version, kernel and bitness
|
||||
// On Windows it performs additional output enhancements.
|
||||
func GetOSVersion() (osVersion, osKernel string) {
|
||||
if platform, _, version, err := host.PlatformInformation(); err == nil && platform != "" {
|
||||
osVersion = platform
|
||||
if version != "" {
|
||||
osVersion += " " + version
|
||||
}
|
||||
}
|
||||
|
||||
if version, err := host.KernelVersion(); err == nil && version != "" {
|
||||
osKernel = version
|
||||
|
||||
// Prevent duplication of output on Windows
|
||||
if strings.Contains(osVersion, osKernel) {
|
||||
deduped := strings.TrimSpace(strings.Replace(osVersion, osKernel, "", 1))
|
||||
if deduped != "" {
|
||||
osVersion = deduped
|
||||
}
|
||||
}
|
||||
|
||||
// Simplify kernel output: `RELEASE.BUILD Build BUILD` -> `RELEASE.BUILD`
|
||||
match := regexp.MustCompile(`^([\d\.]+?\.)(\d+) Build (\d+)$`).FindStringSubmatch(osKernel)
|
||||
if len(match) == 4 && match[2] == match[3] {
|
||||
osKernel = match[1] + match[2]
|
||||
}
|
||||
}
|
||||
|
||||
friendlyName := getRegistryVersionString("ReleaseId")
|
||||
if osVersion != "" && friendlyName != "" {
|
||||
osVersion += " " + friendlyName
|
||||
}
|
||||
|
||||
updateRevision := getRegistryVersionInt("UBR")
|
||||
if osKernel != "" && updateRevision != 0 {
|
||||
osKernel += fmt.Sprintf(".%d", updateRevision)
|
||||
}
|
||||
|
||||
if arch, err := host.KernelArch(); err == nil && arch != "" {
|
||||
if strings.HasSuffix(arch, "64") && osVersion != "" {
|
||||
osVersion += " (64 bit)"
|
||||
}
|
||||
if osKernel != "" {
|
||||
osKernel += " (" + arch + ")"
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var regVersionKeyUTF16 = windows.StringToUTF16Ptr(`SOFTWARE\Microsoft\Windows NT\CurrentVersion`)
|
||||
|
||||
func getRegistryVersionString(name string) string {
|
||||
var (
|
||||
err error
|
||||
handle windows.Handle
|
||||
bufLen uint32
|
||||
valType uint32
|
||||
)
|
||||
|
||||
err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, regVersionKeyUTF16, 0, windows.KEY_READ|windows.KEY_WOW64_64KEY, &handle)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer func() {
|
||||
_ = windows.RegCloseKey(handle)
|
||||
}()
|
||||
|
||||
nameUTF16 := windows.StringToUTF16Ptr(name)
|
||||
err = windows.RegQueryValueEx(handle, nameUTF16, nil, &valType, nil, &bufLen)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
regBuf := make([]uint16, bufLen/2+1)
|
||||
err = windows.RegQueryValueEx(handle, nameUTF16, nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return windows.UTF16ToString(regBuf[:])
|
||||
}
|
||||
|
||||
func getRegistryVersionInt(name string) int {
|
||||
var (
|
||||
err error
|
||||
handle windows.Handle
|
||||
bufLen uint32
|
||||
valType uint32
|
||||
)
|
||||
|
||||
err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, regVersionKeyUTF16, 0, windows.KEY_READ|windows.KEY_WOW64_64KEY, &handle)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
defer func() {
|
||||
_ = windows.RegCloseKey(handle)
|
||||
}()
|
||||
|
||||
nameUTF16 := windows.StringToUTF16Ptr(name)
|
||||
err = windows.RegQueryValueEx(handle, nameUTF16, nil, &valType, nil, &bufLen)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
if valType != registry.DWORD || bufLen != 4 {
|
||||
return 0
|
||||
}
|
||||
var val32 uint32
|
||||
err = windows.RegQueryValueEx(handle, nameUTF16, nil, &valType, (*byte)(unsafe.Pointer(&val32)), &bufLen)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return int(val32)
|
||||
}
|
||||
@@ -5,7 +5,10 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Tags contains slice of build tags
|
||||
// Tags contains slice of build tags.
|
||||
// The `cmount` tag is added by cmd/cmount/mount.go only if build is static.
|
||||
// The `noselfupdate` tag is added by cmd/selfupdate/noselfupdate.go
|
||||
// Other tags including `cgo` are detected in this package.
|
||||
var Tags []string
|
||||
|
||||
// GetLinkingAndTags tells how the rclone executable was linked
|
||||
|
||||
31
lib/cache/cache.go
vendored
31
lib/cache/cache.go
vendored
@@ -28,6 +28,30 @@ func New() *Cache {
|
||||
}
|
||||
}
|
||||
|
||||
// SetExpireDuration sets the interval at which things expire
|
||||
//
|
||||
// If it is less than or equal to 0 then things are never cached
|
||||
func (c *Cache) SetExpireDuration(d time.Duration) *Cache {
|
||||
c.expireDuration = d
|
||||
return c
|
||||
}
|
||||
|
||||
// returns true if we aren't to cache anything
|
||||
func (c *Cache) noCache() bool {
|
||||
return c.expireDuration <= 0
|
||||
}
|
||||
|
||||
// SetExpireInterval sets the interval at which the cache expiry runs
|
||||
//
|
||||
// Set to 0 or a -ve number to disable
|
||||
func (c *Cache) SetExpireInterval(d time.Duration) *Cache {
|
||||
if d <= 0 {
|
||||
d = 100 * 365 * 24 * time.Hour
|
||||
}
|
||||
c.expireInterval = d
|
||||
return c
|
||||
}
|
||||
|
||||
// cacheEntry is stored in the cache
|
||||
type cacheEntry struct {
|
||||
value interface{} // cached item
|
||||
@@ -69,7 +93,9 @@ func (c *Cache) Get(key string, create CreateFunc) (value interface{}, err error
|
||||
err: err,
|
||||
}
|
||||
c.mu.Lock()
|
||||
c.cache[key] = entry
|
||||
if !c.noCache() {
|
||||
c.cache[key] = entry
|
||||
}
|
||||
}
|
||||
defer c.mu.Unlock()
|
||||
c.used(entry)
|
||||
@@ -100,6 +126,9 @@ func (c *Cache) Unpin(key string) {
|
||||
func (c *Cache) Put(key string, value interface{}) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.noCache() {
|
||||
return
|
||||
}
|
||||
entry := &cacheEntry{
|
||||
value: value,
|
||||
key: key,
|
||||
|
||||
27
lib/cache/cache_test.go
vendored
27
lib/cache/cache_test.go
vendored
@@ -100,7 +100,7 @@ func TestPut(t *testing.T) {
|
||||
func TestCacheExpire(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
c.expireInterval = time.Millisecond
|
||||
c.SetExpireInterval(time.Millisecond)
|
||||
assert.Equal(t, false, c.expireRunning)
|
||||
|
||||
_, err := c.Get("/", create)
|
||||
@@ -127,6 +127,31 @@ func TestCacheExpire(t *testing.T) {
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func TestCacheNoExpire(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
assert.False(t, c.noCache())
|
||||
|
||||
c.SetExpireDuration(0)
|
||||
assert.Equal(t, false, c.expireRunning)
|
||||
|
||||
assert.True(t, c.noCache())
|
||||
|
||||
f, err := c.Get("/", create)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, f)
|
||||
|
||||
c.mu.Lock()
|
||||
assert.Equal(t, 0, len(c.cache))
|
||||
c.mu.Unlock()
|
||||
|
||||
c.Put("/alien", "slime")
|
||||
|
||||
c.mu.Lock()
|
||||
assert.Equal(t, 0, len(c.cache))
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func TestCachePin(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
|
||||
@@ -447,19 +447,46 @@ Execute the following on the machine with the web browser (same rclone
|
||||
version recommended):
|
||||
|
||||
`)
|
||||
if changed {
|
||||
fmt.Printf("\trclone authorize %q -- %q %q\n", id, oauthConfig.ClientID, oauthConfig.ClientSecret)
|
||||
// Find the configuration
|
||||
ri, err := fs.Find(id)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "oauthutil authorize")
|
||||
}
|
||||
// Find the overridden options
|
||||
inM := ri.Options.NonDefault(m)
|
||||
delete(inM, config.ConfigToken) // delete token as we are refreshing it
|
||||
for k, v := range inM {
|
||||
fs.Debugf(nil, "sending %s = %q", k, v)
|
||||
}
|
||||
// Encode them into a string
|
||||
mCopyString, err := inM.Encode()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "oauthutil authorize encode")
|
||||
}
|
||||
// Write what the user has to do
|
||||
if len(mCopyString) > 0 {
|
||||
fmt.Printf("\trclone authorize %q %q\n", id, mCopyString)
|
||||
} else {
|
||||
fmt.Printf("\trclone authorize %q\n", id)
|
||||
}
|
||||
fmt.Println("\nThen paste the result below:")
|
||||
code := config.ReadNonEmptyLine("result> ")
|
||||
token := &oauth2.Token{}
|
||||
err := json.Unmarshal([]byte(code), token)
|
||||
if err != nil {
|
||||
return err
|
||||
// Read the updates to the config
|
||||
var outM configmap.Simple
|
||||
for {
|
||||
outM = configmap.Simple{}
|
||||
code := config.ReadNonEmptyLine("result> ")
|
||||
err = outM.Decode(code)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
fmt.Printf("Couldn't decode response - try again (make sure you are using a matching version of rclone on both sides: %v\n", err)
|
||||
}
|
||||
return PutToken(name, m, token, true)
|
||||
// Save the config updates
|
||||
for k, v := range outM {
|
||||
m.Set(k, v)
|
||||
fs.Debugf(nil, "received %s = %q", k, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -526,14 +553,6 @@ version recommended):
|
||||
return errors.Wrap(err, "failed to get token")
|
||||
}
|
||||
|
||||
// Print code if we are doing a manual auth
|
||||
if authorizeOnly {
|
||||
result, err := json.Marshal(token)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal token")
|
||||
}
|
||||
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste\n", result)
|
||||
}
|
||||
return PutToken(name, m, token, true)
|
||||
}
|
||||
|
||||
|
||||
17
vfs/dir.go
17
vfs/dir.go
@@ -843,6 +843,23 @@ func (d *Dir) Open(flags int) (fd Handle, err error) {
|
||||
// Create makes a new file node
|
||||
func (d *Dir) Create(name string, flags int) (*File, error) {
|
||||
// fs.Debugf(path, "Dir.Create")
|
||||
// Return existing node if one exists
|
||||
node, err := d.stat(name)
|
||||
switch err {
|
||||
case ENOENT:
|
||||
// not found, carry on
|
||||
case nil:
|
||||
// found so check what it is
|
||||
if node.IsFile() {
|
||||
return node.(*File), err
|
||||
}
|
||||
return nil, EEXIST // EISDIR would be better but we don't have that
|
||||
default:
|
||||
// a different error - report
|
||||
fs.Errorf(d, "Dir.Create stat failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
// node doesn't exist so create it
|
||||
if d.vfs.Opt.ReadOnly {
|
||||
return nil, EROFS
|
||||
}
|
||||
|
||||
@@ -375,6 +375,13 @@ func TestDirCreate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), file2.Size())
|
||||
|
||||
// Try creating the file again - make sure we get the same file node
|
||||
file3, err := dir.Create("potato", os.O_RDWR|os.O_CREATE)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), file3.Size())
|
||||
assert.Equal(t, fmt.Sprintf("%p", file), fmt.Sprintf("%p", file3), "didn't return same node")
|
||||
|
||||
// Test read only fs creating new
|
||||
vfs.Opt.ReadOnly = true
|
||||
_, err = dir.Create("sausage", os.O_WRONLY|os.O_CREATE)
|
||||
assert.Equal(t, EROFS, err)
|
||||
|
||||
Reference in New Issue
Block a user