mirror of
https://github.com/rclone/rclone.git
synced 2026-01-30 00:03:34 +00:00
Compare commits
64 Commits
v1.45
...
experiment
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a5863650af | ||
|
|
2d2533a08a | ||
|
|
733b072d4f | ||
|
|
2d01a65e36 | ||
|
|
b8280521a5 | ||
|
|
60e6af2605 | ||
|
|
9d16822c63 | ||
|
|
38a0946071 | ||
|
|
95e52e1ac3 | ||
|
|
51ab1c940a | ||
|
|
6f30427357 | ||
|
|
3220acc729 | ||
|
|
3c97933416 | ||
|
|
039e2a9649 | ||
|
|
1c01d0b84a | ||
|
|
39eac7a765 | ||
|
|
082a7065b1 | ||
|
|
f7b08a6982 | ||
|
|
37e32d8c80 | ||
|
|
f2a1b991de | ||
|
|
4128e696d6 | ||
|
|
7e7f3de355 | ||
|
|
1f6a1cd26d | ||
|
|
2cfe2354df | ||
|
|
13387c0838 | ||
|
|
5babf2dc5c | ||
|
|
9012d7c6c1 | ||
|
|
df1faa9a8f | ||
|
|
3de7ad5223 | ||
|
|
9cb3a68c38 | ||
|
|
c1dd76788d | ||
|
|
5ee1816a71 | ||
|
|
63b51c6742 | ||
|
|
e7684b7ed5 | ||
|
|
dda23baf42 | ||
|
|
8575abf599 | ||
|
|
feea0532cd | ||
|
|
d3e8ae1820 | ||
|
|
91a9a959a2 | ||
|
|
04eae51d11 | ||
|
|
8fb707e16d | ||
|
|
4138d5aa75 | ||
|
|
fc654a4cec | ||
|
|
26b5f55cba | ||
|
|
3f572e6bf2 | ||
|
|
941ad6bc62 | ||
|
|
5d1d93e163 | ||
|
|
35fba5bfdd | ||
|
|
887834da91 | ||
|
|
107293c80e | ||
|
|
e3c4ebd59a | ||
|
|
d99ffde7c0 | ||
|
|
198c34ce21 | ||
|
|
0eba88bbfe | ||
|
|
aeea4430d5 | ||
|
|
4b15c4215c | ||
|
|
50452207d9 | ||
|
|
01fcad9b9c | ||
|
|
eb41253764 | ||
|
|
89625e54cf | ||
|
|
58f7141c96 | ||
|
|
e56c6402a7 | ||
|
|
d0eb8ddc30 | ||
|
|
a6c28a5faa |
@@ -1,14 +1,17 @@
|
||||
# Maintainers guide for rclone #
|
||||
|
||||
Current active maintainers of rclone are
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
* Nick Craig-Wood @ncw
|
||||
* Stefan Breunig @breunigs
|
||||
* Ishuah Kariuki @ishuah
|
||||
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||
* Fabian Möller @B4dM4n
|
||||
* Alex Chen @Cnly
|
||||
* Sandeep Ummadi @sandeepkru
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
@@ -91,4 +91,4 @@ License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
[COPYING file](/rclone/COPYING) included in this package).
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
acd "github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -22,12 +22,14 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
@@ -135,6 +137,7 @@ type Fs struct {
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURL *azblob.ContainerURL // reference to containerURL
|
||||
container string // the container we are working on
|
||||
@@ -272,6 +275,38 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// httpClientFactory creates a Factory object that sends HTTP requests
|
||||
// to a rclone's http.Client.
|
||||
//
|
||||
// copied from azblob.newDefaultHTTPClientFactory
|
||||
func httpClientFactory(client *http.Client) pipeline.Factory {
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
r, err := client.Do(request.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = pipeline.NewError(err, "HTTP request failed")
|
||||
}
|
||||
return pipeline.NewHTTPResponse(r), err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// newPipeline creates a Pipeline using the specified credentials and options.
|
||||
//
|
||||
// this code was copied from azblob.NewPipeline
|
||||
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
|
||||
// Closest to API goes first; closest to the wire goes last
|
||||
factories := []pipeline.Factory{
|
||||
azblob.NewTelemetryPolicyFactory(o.Telemetry),
|
||||
azblob.NewUniqueRequestIDPolicyFactory(),
|
||||
azblob.NewRetryPolicyFactory(o.Retry),
|
||||
c,
|
||||
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
|
||||
azblob.NewRequestLogPolicyFactory(o.RequestLog),
|
||||
}
|
||||
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -307,6 +342,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
@@ -323,7 +375,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.SASURL != "":
|
||||
@@ -332,7 +384,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||
}
|
||||
// use anonymous credentials in case of sas url
|
||||
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
// Check if we have container level SAS or account level sas
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
@@ -349,24 +401,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
}
|
||||
f.svcURL = &serviceURL
|
||||
f.cntURL = &containerURL
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
svcURL: &serviceURL,
|
||||
cntURL: &containerURL,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
@@ -380,8 +417,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.root = oldRoot
|
||||
return f, nil
|
||||
}
|
||||
@@ -437,6 +474,21 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
// Note that metadata with hdi_isfolder = true seems to be a
|
||||
// defacto standard for marking blobs as directories.
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
if endsWithSlash || remote == "" || metadata["hdi_isfolder"] == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object
|
||||
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
|
||||
@@ -472,6 +524,7 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
MaxResults: int32(maxResults),
|
||||
}
|
||||
ctx := context.Background()
|
||||
directoryMarkers := map[string]struct{}{}
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -501,13 +554,23 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote := file.Name[len(f.root):]
|
||||
// Check for directory
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
remote = remote[:len(remote)-1]
|
||||
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, file, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Keep track of directory markers. If recursing then
|
||||
// there will be no Prefixes so no need to keep track
|
||||
if !recurse {
|
||||
directoryMarkers[remote] = struct{}{}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, isDirectory)
|
||||
err = fn(remote, file, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -520,6 +583,10 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote = remote[len(f.root):]
|
||||
// Don't send if already sent as a directory marker
|
||||
if _, found := directoryMarkers[remote]; found {
|
||||
continue
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, nil, true)
|
||||
if err != nil {
|
||||
@@ -923,27 +990,37 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
|
||||
metadata := info.NewMetadata()
|
||||
size := info.ContentLength()
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = info.ContentLength()
|
||||
o.size = size
|
||||
o.modTime = time.Time(info.LastModified())
|
||||
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
||||
o.setMetadata(info.NewMetadata())
|
||||
o.setMetadata(metadata)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
metadata := info.Metadata
|
||||
size := *info.Properties.ContentLength
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5)
|
||||
o.mimeType = *info.Properties.ContentType
|
||||
o.size = *info.Properties.ContentLength
|
||||
o.size = size
|
||||
o.modTime = info.Properties.LastModified
|
||||
o.accessTier = info.Properties.AccessTier
|
||||
o.setMetadata(info.Metadata)
|
||||
o.setMetadata(metadata)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||
// +build plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -136,6 +136,7 @@ type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
|
||||
@@ -368,6 +368,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.bucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.info.Allowed.BucketName
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if allowedBucket != f.bucket {
|
||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.markBucketOK()
|
||||
f.setBucketID(f.info.Allowed.BucketID)
|
||||
}
|
||||
@@ -980,6 +987,12 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
errReturn = err
|
||||
}
|
||||
}
|
||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||
if time.Since(time.Time(timestamp)).Hours() > 24 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
||||
@@ -1003,6 +1016,9 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
if object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
|
||||
}
|
||||
|
||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -15,7 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ var (
|
||||
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
|
||||
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
|
||||
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
|
||||
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
|
||||
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
|
||||
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
|
||||
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
|
||||
@@ -284,6 +285,9 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
// not possible if decodeFilename() working correctly
|
||||
return "", ErrorTooShortAfterDecode
|
||||
}
|
||||
if len(rawCiphertext) > 2048 {
|
||||
return "", ErrorTooLongAfterDecode
|
||||
}
|
||||
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
|
||||
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
|
||||
if err != nil {
|
||||
|
||||
@@ -194,6 +194,10 @@ func TestEncryptSegment(t *testing.T) {
|
||||
|
||||
func TestDecryptSegment(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 3328)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -201,6 +205,7 @@ func TestDecryptSegment(t *testing.T) {
|
||||
}{
|
||||
{"64=", ErrorBadBase32Encoding},
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package drive interfaces with the Google Drive object storage system
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
// FIXME need to deal with some corner cases
|
||||
@@ -122,6 +125,29 @@ var (
|
||||
_linkTemplates map[string]*template.Template // available link types
|
||||
)
|
||||
|
||||
// Parse the scopes option returning a slice of scopes
|
||||
func driveScopes(scopesString string) (scopes []string) {
|
||||
if scopesString == "" {
|
||||
scopesString = defaultScope
|
||||
}
|
||||
for _, scope := range strings.Split(scopesString, ",") {
|
||||
scope = strings.TrimSpace(scope)
|
||||
scopes = append(scopes, scopePrefix+scope)
|
||||
}
|
||||
return scopes
|
||||
}
|
||||
|
||||
// Returns true if one of the scopes was "drive.appfolder"
|
||||
func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
for _, scope := range scopes {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -136,18 +162,14 @@ func init() {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
if opt.Scope == "" {
|
||||
opt.Scope = defaultScope
|
||||
}
|
||||
driveConfig.Scopes = nil
|
||||
for _, scope := range strings.Split(opt.Scope, ",") {
|
||||
driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope))
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if scope == "drive.appfolder" {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
driveConfig.Scopes = driveScopes(opt.Scope)
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" {
|
||||
err = oauthutil.Config("drive", name, m, driveConfig)
|
||||
if err != nil {
|
||||
@@ -753,7 +775,8 @@ func newPacer() *pacer.Pacer {
|
||||
}
|
||||
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...)
|
||||
scopes := driveScopes(opt.Scope)
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
@@ -20,6 +22,31 @@ import (
|
||||
"google.golang.org/api/drive/v3"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantFlag bool
|
||||
}{
|
||||
{"", []string{
|
||||
"https://www.googleapis.com/auth/drive",
|
||||
}, false},
|
||||
{" drive.file , drive.readonly", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.readonly",
|
||||
}, false},
|
||||
{" drive.file , drive.appfolder", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.appfolder",
|
||||
}, true},
|
||||
} {
|
||||
got := driveScopes(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
gotFlag := driveScopesContainsAppFolder(got)
|
||||
assert.Equal(t, test.wantFlag, gotFlag, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
var additionalMimeTypes = map[string]string{
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Test Drive filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
|
||||
6
backend/drive/drive_unsupported.go
Normal file
6
backend/drive/drive_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package drive
|
||||
@@ -8,6 +8,8 @@
|
||||
//
|
||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
@@ -203,7 +204,16 @@ func shouldRetry(err error) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
baseErrString := errors.Cause(err).Error()
|
||||
// FIXME there is probably a better way of doing this!
|
||||
// handle any official Retry-After header from Dropbox's SDK first
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behaviour for backward compatibility
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
@@ -193,7 +193,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
}
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Stat failed")
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -416,6 +416,9 @@ func (o *Object) url() string {
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
|
||||
@@ -144,6 +144,11 @@ func TestNewObject(t *testing.T) {
|
||||
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject("not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
|
||||
@@ -179,13 +179,13 @@ func (u *uploader) upload() error {
|
||||
// Do one read to determine if we have more than one part
|
||||
reader, _, err := u.nextReader()
|
||||
if err == io.EOF { // single part
|
||||
fs.Debugf(u, "Tried to upload a singile object to QingStor")
|
||||
fs.Debugf(u, "Uploading as single part object to QingStor")
|
||||
return u.singlePartUpload(reader)
|
||||
} else if err != nil {
|
||||
return errors.Errorf("read upload data failed: %s", err)
|
||||
}
|
||||
|
||||
fs.Debugf(u, "Treied to upload a multi-part object to QingStor")
|
||||
fs.Debugf(u, "Uploading as multi-part object to QingStor")
|
||||
mu := multiUploader{uploader: u}
|
||||
return mu.multiPartUpload(reader)
|
||||
}
|
||||
@@ -261,7 +261,7 @@ func (mu *multiUploader) initiate() error {
|
||||
req := qs.InitiateMultipartUploadInput{
|
||||
ContentType: &mu.cfg.mimeType,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to initiate a multi-part upload")
|
||||
fs.Debugf(mu, "Initiating a multi-part upload")
|
||||
rsp, err := bucketInit.InitiateMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.uploadID = rsp.UploadID
|
||||
@@ -279,12 +279,12 @@ func (mu *multiUploader) send(c chunk) error {
|
||||
ContentLength: &c.size,
|
||||
Body: c.buffer,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to upload a part to QingStor that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Uploading a part to QingStor with partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
_, err := bucketInit.UploadMultipart(mu.cfg.key, &req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(mu, "Upload part finished that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Done uploading part partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
|
||||
mu.mtx.Lock()
|
||||
defer mu.mtx.Unlock()
|
||||
@@ -304,7 +304,7 @@ func (mu *multiUploader) list() error {
|
||||
req := qs.ListMultipartInput{
|
||||
UploadID: mu.uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to list a multi-part")
|
||||
fs.Debugf(mu, "Reading multi-part details")
|
||||
rsp, err := bucketInit.ListMultipart(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.objectParts = rsp.ObjectParts
|
||||
@@ -331,7 +331,7 @@ func (mu *multiUploader) complete() error {
|
||||
ObjectParts: mu.objectParts,
|
||||
ETag: &md5String,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to complete a multi-part")
|
||||
fs.Debugf(mu, "Completing multi-part object")
|
||||
_, err = bucketInit.CompleteMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
fs.Debugf(mu, "Complete multi-part finished")
|
||||
@@ -348,7 +348,7 @@ func (mu *multiUploader) abort() error {
|
||||
req := qs.AbortMultipartUploadInput{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to abort a multi-part")
|
||||
fs.Debugf(mu, "Aborting multi-part object %q", *uploadID)
|
||||
_, err = bucketInit.AbortMultipartUpload(mu.cfg.key, &req)
|
||||
}
|
||||
|
||||
|
||||
223
backend/s3/s3.go
223
backend/s3/s3.go
@@ -543,13 +543,24 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
}, {
|
||||
Value: "ONEZONE_IA",
|
||||
Help: "One Zone Infrequent Access storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Glacier storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this
|
||||
size. The default is 5MB. The minimum is 5MB.
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
@@ -577,7 +588,7 @@ concurrently.
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 2,
|
||||
Default: 4,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "force_path_style",
|
||||
@@ -607,14 +618,16 @@ Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -630,6 +643,7 @@ type Options struct {
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
@@ -651,6 +665,7 @@ type Fs struct {
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
}
|
||||
|
||||
// Object describes a s3 object
|
||||
@@ -854,6 +869,21 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -866,6 +896,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: chunk size")
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: upload cutoff")
|
||||
}
|
||||
bucket, directory, err := s3ParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -882,6 +916,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -1556,38 +1591,46 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
modTime := src.ModTime()
|
||||
size := src.Size()
|
||||
|
||||
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
var uploader *s3manager.Uploader
|
||||
if multipart {
|
||||
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Set the mtime in the meta data
|
||||
metadata := map[string]*string{
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
if !o.fs.opt.DisableChecksum && size > uploader.PartSize {
|
||||
// read the md5sum if available for non multpart and if
|
||||
// disable checksum isn't present.
|
||||
var md5sum string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
hash, err := src.Hash(hash.MD5)
|
||||
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
hashBytes, err := hex.DecodeString(hash)
|
||||
|
||||
if err == nil {
|
||||
metadata[metaMD5Hash] = aws.String(base64.StdEncoding.EncodeToString(hashBytes))
|
||||
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart {
|
||||
metadata[metaMD5Hash] = &md5sum
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1596,30 +1639,98 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
mimeType := fs.MimeType(src)
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.Upload(&req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
if multipart {
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.Upload(&req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
if md5sum != "" {
|
||||
req.ContentMD5 = &md5sum
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
|
||||
// Create the request
|
||||
putObj, _ := o.fs.c.PutObjectRequest(&req)
|
||||
|
||||
// Sign it so we can upload using a presigned request.
|
||||
//
|
||||
// Note the SDK doesn't currently support streaming to
|
||||
// PutObject so we'll use this work-around.
|
||||
url, headers, err := putObj.PresignRequest(15 * time.Minute)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: sign request")
|
||||
}
|
||||
|
||||
// Set request to nil if empty so as not to make chunked encoding
|
||||
if size == 0 {
|
||||
in = nil
|
||||
}
|
||||
|
||||
// create the vanilla http request
|
||||
httpReq, err := http.NewRequest("PUT", url, in)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: new request")
|
||||
}
|
||||
|
||||
// set the headers we signed and the length
|
||||
httpReq.Header = headers
|
||||
httpReq.ContentLength = size
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Do(httpReq)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
|
||||
return false, nil
|
||||
}
|
||||
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
|
||||
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
|
||||
@@ -23,4 +23,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/xanzy/ssh-agent"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
@@ -594,12 +594,22 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
// Check to see if directory is empty as some servers will
|
||||
// delete recursively with RemoveDirectory
|
||||
entries, err := f.List(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
// Remove the directory
|
||||
root := path.Join(f.root, dir)
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
err = c.sftpClient.Remove(root)
|
||||
err = c.sftpClient.RemoveDirectory(root)
|
||||
f.putSftpConnection(&c, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -43,6 +43,20 @@ Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5GB which is its maximum value.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_chunk",
|
||||
Help: `Don't chunk files during streaming upload.
|
||||
|
||||
When doing streaming uploads (eg using rcat or mount) setting this
|
||||
flag will cause the swift backend to not upload chunked files.
|
||||
|
||||
This will limit the maximum upload size to 5GB. However non chunked
|
||||
files are easier to deal with and have an MD5SUM.
|
||||
|
||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}}
|
||||
|
||||
// Register with Fs
|
||||
@@ -175,6 +189,7 @@ type Options struct {
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
}
|
||||
|
||||
// Fs represents a remote swift server
|
||||
@@ -196,10 +211,13 @@ type Fs struct {
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
info swift.Object // Info from the swift object if known
|
||||
headers swift.Headers // The object headers if known
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
lastModified time.Time
|
||||
contentType string
|
||||
md5 string
|
||||
headers swift.Headers // The object headers if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -432,7 +450,10 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not headers
|
||||
o.info = *info
|
||||
err := o.decodeMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
@@ -829,7 +850,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
fs.Debugf(o, "Returning empty Md5sum for swift large object")
|
||||
return "", nil
|
||||
}
|
||||
return strings.ToLower(o.info.Hash), nil
|
||||
return strings.ToLower(o.md5), nil
|
||||
}
|
||||
|
||||
// hasHeader checks for the header passed in returning false if the
|
||||
@@ -858,7 +879,22 @@ func (o *Object) isStaticLargeObject() (bool, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Bytes
|
||||
return o.size
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from a swift.Object
|
||||
//
|
||||
// Sets
|
||||
// o.lastModified
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.contentType
|
||||
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
|
||||
o.lastModified = info.LastModified
|
||||
o.size = info.Bytes
|
||||
o.md5 = info.Hash
|
||||
o.contentType = info.ContentType
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
@@ -882,8 +918,11 @@ func (o *Object) readMetaData() (err error) {
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.info = info
|
||||
o.headers = h
|
||||
err = o.decodeMetaData(&info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -894,17 +933,17 @@ func (o *Object) readMetaData() (err error) {
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
if fs.Config.UseServerModTime {
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %s", err)
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := o.headers.ObjectMetadata().GetModTime()
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read mtime from object: %v", err)
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
@@ -938,7 +977,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
// It compares the Content-Type to directoryMarkerContentType - that
|
||||
// makes it a directory marker which is not storable.
|
||||
func (o *Object) Storable() bool {
|
||||
return o.info.ContentType != directoryMarkerContentType
|
||||
return o.contentType != directoryMarkerContentType
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
@@ -1105,20 +1144,31 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
contentType := fs.MimeType(src)
|
||||
headers := m.ObjectHeaders()
|
||||
uniquePrefix := ""
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.headers = nil // wipe old metadata
|
||||
} else {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
|
||||
}
|
||||
var rxHeaders swift.Headers
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// set Metadata since ObjectPut checked the hash and length so we know the
|
||||
// object has been safely uploaded
|
||||
o.lastModified = modTime
|
||||
o.size = size
|
||||
o.md5 = rxHeaders["ETag"]
|
||||
o.contentType = contentType
|
||||
o.headers = headers
|
||||
}
|
||||
|
||||
// If file was a dynamic large object then remove old/all segments
|
||||
@@ -1129,8 +1179,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.headers = nil // wipe old metadata
|
||||
// Read the metadata from the newly created object if necessary
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
@@ -1160,7 +1209,7 @@ func (o *Object) Remove() error {
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
return o.info.ContentType
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
|
||||
@@ -6,7 +6,10 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -148,6 +151,8 @@ var timeFormats = []string{
|
||||
time.RFC3339, // Wed, 31 Oct 2018 13:57:11 CET (as used by komfortcloud.de)
|
||||
}
|
||||
|
||||
var oneTimeError sync.Once
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
@@ -171,5 +176,14 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
oneTimeError.Do(func() {
|
||||
fs.Errorf(nil, "Failed to parse time %q - using the epoch", v)
|
||||
})
|
||||
// Return the epoch instead
|
||||
*t = Time(time.Unix(0, 0))
|
||||
// ignore error
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -601,10 +601,9 @@ func (f *Fs) mkParentDir(dirPath string) error {
|
||||
return f.mkdir(parent)
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// We assume the root is already ceated
|
||||
// low level mkdir, only makes the directory, doesn't attempt to create parents
|
||||
func (f *Fs) _mkdir(dirPath string) error {
|
||||
// We assume the root is already created
|
||||
if dirPath == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -617,20 +616,26 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
Path: dirPath,
|
||||
NoResponse: true,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
err := f._mkdir(dirPath)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// already exists
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable {
|
||||
return nil
|
||||
}
|
||||
// parent does not exists
|
||||
// parent does not exist
|
||||
if apiErr.StatusCode == http.StatusConflict {
|
||||
err = f.mkParentDir(dirPath)
|
||||
if err == nil {
|
||||
err = f.mkdir(dirPath)
|
||||
err = f._mkdir(dirPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
10
cmd/cmd.go
10
cmd/cmd.go
@@ -51,7 +51,7 @@ var (
|
||||
errorCommandNotFound = errors.New("command not found")
|
||||
errorUncategorized = errors.New("uncategorized error")
|
||||
errorNotEnoughArguments = errors.New("not enough arguments")
|
||||
errorTooManyArguents = errors.New("too many arguments")
|
||||
errorTooManyArguments = errors.New("too many arguments")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -294,14 +294,12 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
func CheckArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) {
|
||||
if len(args) < MinArgs {
|
||||
_ = cmd.Usage()
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum\n", cmd.Name(), MinArgs)
|
||||
// os.Exit(1)
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum: you provided %d non flag arguments: %q\n", cmd.Name(), MinArgs, len(args), args)
|
||||
resolveExitCode(errorNotEnoughArguments)
|
||||
} else if len(args) > MaxArgs {
|
||||
_ = cmd.Usage()
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.Name(), MaxArgs)
|
||||
// os.Exit(1)
|
||||
resolveExitCode(errorTooManyArguents)
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum: you provided %d non flag arguments: %q\n", cmd.Name(), MaxArgs, len(args), args)
|
||||
resolveExitCode(errorTooManyArguments)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -51,6 +51,17 @@ written a trailing / - meaning "copy the contents of this directory".
|
||||
This applies to all commands and whether you are talking about the
|
||||
source or destination.
|
||||
|
||||
See the [--no-traverse](/docs/#no-traverse) option for controlling
|
||||
whether rclone lists the destination directory or not. Supplying this
|
||||
option when copying a small number of files into a large destination
|
||||
can speed transfers up greatly.
|
||||
|
||||
For example, if you have many files in /path/to/src but only a few of
|
||||
them change every day, you can to copy all the files which have
|
||||
changed recently very efficiently like this:
|
||||
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
|
||||
@@ -7,7 +7,7 @@ package mountlib
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/sevlyar/go-daemon"
|
||||
daemon "github.com/sevlyar/go-daemon"
|
||||
)
|
||||
|
||||
func startBackgroundMode() bool {
|
||||
|
||||
@@ -37,6 +37,11 @@ into ` + "`dest:path`" + ` then delete the original (if no errors on copy) in
|
||||
|
||||
If you want to delete empty source directories after move, use the --delete-empty-src-dirs flag.
|
||||
|
||||
See the [--no-traverse](/docs/#no-traverse) option for controlling
|
||||
whether rclone lists the destination directory or not. Supplying this
|
||||
option when moving a small number of files into a large destination
|
||||
can speed transfers up greatly.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
--dry-run flag.
|
||||
|
||||
|
||||
@@ -27,6 +27,11 @@ const (
|
||||
//
|
||||
// It returns a func which should be called to stop the stats.
|
||||
func startProgress() func() {
|
||||
err := initTerminal()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to start progress: %v", err)
|
||||
return func() {}
|
||||
}
|
||||
stopStats := make(chan struct{})
|
||||
oldLogPrint := fs.LogPrint
|
||||
if !log.Redirected() {
|
||||
|
||||
@@ -4,6 +4,10 @@ package cmd
|
||||
|
||||
import "os"
|
||||
|
||||
func initTerminal() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeToTerminal(b []byte) {
|
||||
_, _ = os.Stdout.Write(b)
|
||||
}
|
||||
|
||||
@@ -5,22 +5,31 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
ansiterm "github.com/Azure/go-ansiterm"
|
||||
"github.com/Azure/go-ansiterm/winterm"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
initAnsiParser sync.Once
|
||||
ansiParser *ansiterm.AnsiParser
|
||||
ansiParser *ansiterm.AnsiParser
|
||||
)
|
||||
|
||||
func initTerminal() error {
|
||||
winEventHandler := winterm.CreateWinEventHandler(os.Stdout.Fd(), os.Stdout)
|
||||
if winEventHandler == nil {
|
||||
err := syscall.GetLastError()
|
||||
if err == nil {
|
||||
err = errors.New("initialization failed")
|
||||
}
|
||||
return errors.Wrap(err, "windows terminal")
|
||||
}
|
||||
ansiParser = ansiterm.CreateParser("Ground", winEventHandler)
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeToTerminal(b []byte) {
|
||||
initAnsiParser.Do(func() {
|
||||
winEventHandler := winterm.CreateWinEventHandler(os.Stdout.Fd(), os.Stdout)
|
||||
ansiParser = ansiterm.CreateParser("Ground", winEventHandler)
|
||||
})
|
||||
// Remove all non-ASCII characters until this is fixed
|
||||
// https://github.com/Azure/go-ansiterm/issues/26
|
||||
r := []rune(string(b))
|
||||
|
||||
@@ -126,7 +126,7 @@ func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote stri
|
||||
}
|
||||
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(dirRemote)
|
||||
directory := serve.NewDirectory(dirRemote, s.HTMLTemplate)
|
||||
for _, node := range dirEntries {
|
||||
directory.AddEntry(node.Path(), node.IsDir())
|
||||
}
|
||||
|
||||
@@ -4,14 +4,18 @@ package httplib
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
auth "github.com/abbot/go-http-auth"
|
||||
"github.com/ncw/rclone/cmd/serve/httplib/serve/data"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -105,8 +109,9 @@ type Server struct {
|
||||
waitChan chan struct{} // for waiting on the listener to close
|
||||
httpServer *http.Server
|
||||
basicPassHashed string
|
||||
useSSL bool // if server is configured for SSL/TLS
|
||||
usingAuth bool // set if authentication is configured
|
||||
useSSL bool // if server is configured for SSL/TLS
|
||||
usingAuth bool // set if authentication is configured
|
||||
HTMLTemplate *template.Template // HTML template for web interface
|
||||
}
|
||||
|
||||
// singleUserProvider provides the encrypted password for a single user
|
||||
@@ -143,7 +148,28 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
secretProvider = s.singleUserProvider
|
||||
}
|
||||
authenticator := auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
|
||||
handler = auth.JustCheck(authenticator, handler.ServeHTTP)
|
||||
oldHandler := handler
|
||||
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if username := authenticator.CheckAuth(r); username == "" {
|
||||
authHeader := r.Header.Get(authenticator.Headers.V().Authorization)
|
||||
if authHeader != "" {
|
||||
s := strings.SplitN(authHeader, " ", 2)
|
||||
var userName = "UNKNOWN"
|
||||
if len(s) == 2 && s[0] == "Basic" {
|
||||
b, err := base64.StdEncoding.DecodeString(s[1])
|
||||
if err == nil {
|
||||
userName = strings.SplitN(string(b), ":", 2)[0]
|
||||
}
|
||||
}
|
||||
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, userName)
|
||||
} else {
|
||||
fs.Infof(r.URL.Path, "%s: Basic auth challenge sent", r.RemoteAddr)
|
||||
}
|
||||
authenticator.RequireAuth(w, r)
|
||||
} else {
|
||||
oldHandler.ServeHTTP(w, r)
|
||||
}
|
||||
})
|
||||
s.usingAuth = true
|
||||
}
|
||||
|
||||
@@ -182,6 +208,12 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
s.httpServer.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
|
||||
htmlTemplate, templateErr := data.GetTemplate()
|
||||
if templateErr != nil {
|
||||
log.Fatalf(templateErr.Error())
|
||||
}
|
||||
s.HTMLTemplate = htmlTemplate
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
|
||||
22
cmd/serve/httplib/serve/data/assets_generate.go
Normal file
22
cmd/serve/httplib/serve/data/assets_generate.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/shurcooL/vfsgen"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var AssetDir http.FileSystem = http.Dir("./templates")
|
||||
err := vfsgen.Generate(AssetDir, vfsgen.Options{
|
||||
PackageName: "data",
|
||||
BuildTags: "!dev",
|
||||
VariableName: "Assets",
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
186
cmd/serve/httplib/serve/data/assets_vfsdata.go
Normal file
186
cmd/serve/httplib/serve/data/assets_vfsdata.go
Normal file
@@ -0,0 +1,186 @@
|
||||
// Code generated by vfsgen; DO NOT EDIT.
|
||||
|
||||
// +build !dev
|
||||
|
||||
package data
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Assets statically implements the virtual filesystem provided to vfsgen.
|
||||
var Assets = func() http.FileSystem {
|
||||
fs := vfsgen۰FS{
|
||||
"/": &vfsgen۰DirInfo{
|
||||
name: "/",
|
||||
modTime: time.Date(2018, 12, 16, 6, 54, 42, 894445775, time.UTC),
|
||||
},
|
||||
"/index.html": &vfsgen۰CompressedFileInfo{
|
||||
name: "index.html",
|
||||
modTime: time.Date(2018, 12, 16, 6, 54, 42, 790442328, time.UTC),
|
||||
uncompressedSize: 226,
|
||||
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x8f\x31\xcf\x83\x20\x10\x86\x77\x7e\xc5\x7d\xc4\xf5\x93\xb8\x35\x0d\xb0\xb4\x6e\x26\x6d\x1a\x3b\x74\x3c\xeb\x29\x24\x4a\x13\xa4\x43\x43\xf8\xef\x0d\xea\xd4\x09\xee\x79\xef\x9e\xcb\xc9\xbf\xf3\xe5\xd4\x3e\xae\x35\x98\x30\x4f\x9a\xc9\xfc\xc0\x84\x6e\x54\x9c\x1c\xcf\x80\xb0\xd7\x4c\xce\x14\x10\x9e\x06\xfd\x42\x41\xf1\x77\x18\xfe\x0f\x39\x0d\x36\x4c\xa4\x63\x84\xb2\xcd\x3f\x48\x49\x8a\x8d\x31\x29\xf6\xd1\xee\xd5\x7f\xb2\xa8\xfa\xe9\x33\x95\x66\x31\x82\x47\x37\x12\x14\x16\x8e\x0a\xca\xda\x05\x6f\x69\xc9\x39\x82\xf1\x34\x28\x1e\x23\x14\xb6\xbc\xdf\x1a\x48\x89\xeb\xad\x6a\x08\x87\xd5\x81\x5a\x76\x1e\xc4\x2a\x22\xd7\xaf\x6c\xdf\x27\xb6\x8b\xbe\x01\x00\x00\xff\xff\x92\x2e\x35\x75\xe2\x00\x00\x00"),
|
||||
},
|
||||
}
|
||||
fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{
|
||||
fs["/index.html"].(os.FileInfo),
|
||||
}
|
||||
|
||||
return fs
|
||||
}()
|
||||
|
||||
type vfsgen۰FS map[string]interface{}
|
||||
|
||||
func (fs vfsgen۰FS) Open(path string) (http.File, error) {
|
||||
path = pathpkg.Clean("/" + path)
|
||||
f, ok := fs[path]
|
||||
if !ok {
|
||||
return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist}
|
||||
}
|
||||
|
||||
switch f := f.(type) {
|
||||
case *vfsgen۰CompressedFileInfo:
|
||||
gr, err := gzip.NewReader(bytes.NewReader(f.compressedContent))
|
||||
if err != nil {
|
||||
// This should never happen because we generate the gzip bytes such that they are always valid.
|
||||
panic("unexpected error reading own gzip compressed bytes: " + err.Error())
|
||||
}
|
||||
return &vfsgen۰CompressedFile{
|
||||
vfsgen۰CompressedFileInfo: f,
|
||||
gr: gr,
|
||||
}, nil
|
||||
case *vfsgen۰DirInfo:
|
||||
return &vfsgen۰Dir{
|
||||
vfsgen۰DirInfo: f,
|
||||
}, nil
|
||||
default:
|
||||
// This should never happen because we generate only the above types.
|
||||
panic(fmt.Sprintf("unexpected type %T", f))
|
||||
}
|
||||
}
|
||||
|
||||
// vfsgen۰CompressedFileInfo is a static definition of a gzip compressed file.
|
||||
type vfsgen۰CompressedFileInfo struct {
|
||||
name string
|
||||
modTime time.Time
|
||||
compressedContent []byte
|
||||
uncompressedSize int64
|
||||
}
|
||||
|
||||
func (f *vfsgen۰CompressedFileInfo) Readdir(count int) ([]os.FileInfo, error) {
|
||||
return nil, fmt.Errorf("cannot Readdir from file %s", f.name)
|
||||
}
|
||||
func (f *vfsgen۰CompressedFileInfo) Stat() (os.FileInfo, error) { return f, nil }
|
||||
|
||||
func (f *vfsgen۰CompressedFileInfo) GzipBytes() []byte {
|
||||
return f.compressedContent
|
||||
}
|
||||
|
||||
func (f *vfsgen۰CompressedFileInfo) Name() string { return f.name }
|
||||
func (f *vfsgen۰CompressedFileInfo) Size() int64 { return f.uncompressedSize }
|
||||
func (f *vfsgen۰CompressedFileInfo) Mode() os.FileMode { return 0444 }
|
||||
func (f *vfsgen۰CompressedFileInfo) ModTime() time.Time { return f.modTime }
|
||||
func (f *vfsgen۰CompressedFileInfo) IsDir() bool { return false }
|
||||
func (f *vfsgen۰CompressedFileInfo) Sys() interface{} { return nil }
|
||||
|
||||
// vfsgen۰CompressedFile is an opened compressedFile instance.
|
||||
type vfsgen۰CompressedFile struct {
|
||||
*vfsgen۰CompressedFileInfo
|
||||
gr *gzip.Reader
|
||||
grPos int64 // Actual gr uncompressed position.
|
||||
seekPos int64 // Seek uncompressed position.
|
||||
}
|
||||
|
||||
func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) {
|
||||
if f.grPos > f.seekPos {
|
||||
// Rewind to beginning.
|
||||
err = f.gr.Reset(bytes.NewReader(f.compressedContent))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.grPos = 0
|
||||
}
|
||||
if f.grPos < f.seekPos {
|
||||
// Fast-forward.
|
||||
_, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.grPos = f.seekPos
|
||||
}
|
||||
n, err = f.gr.Read(p)
|
||||
f.grPos += int64(n)
|
||||
f.seekPos = f.grPos
|
||||
return n, err
|
||||
}
|
||||
func (f *vfsgen۰CompressedFile) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
f.seekPos = 0 + offset
|
||||
case io.SeekCurrent:
|
||||
f.seekPos += offset
|
||||
case io.SeekEnd:
|
||||
f.seekPos = f.uncompressedSize + offset
|
||||
default:
|
||||
panic(fmt.Errorf("invalid whence value: %v", whence))
|
||||
}
|
||||
return f.seekPos, nil
|
||||
}
|
||||
func (f *vfsgen۰CompressedFile) Close() error {
|
||||
return f.gr.Close()
|
||||
}
|
||||
|
||||
// vfsgen۰DirInfo is a static definition of a directory.
|
||||
type vfsgen۰DirInfo struct {
|
||||
name string
|
||||
modTime time.Time
|
||||
entries []os.FileInfo
|
||||
}
|
||||
|
||||
func (d *vfsgen۰DirInfo) Read([]byte) (int, error) {
|
||||
return 0, fmt.Errorf("cannot Read from directory %s", d.name)
|
||||
}
|
||||
func (d *vfsgen۰DirInfo) Close() error { return nil }
|
||||
func (d *vfsgen۰DirInfo) Stat() (os.FileInfo, error) { return d, nil }
|
||||
|
||||
func (d *vfsgen۰DirInfo) Name() string { return d.name }
|
||||
func (d *vfsgen۰DirInfo) Size() int64 { return 0 }
|
||||
func (d *vfsgen۰DirInfo) Mode() os.FileMode { return 0755 | os.ModeDir }
|
||||
func (d *vfsgen۰DirInfo) ModTime() time.Time { return d.modTime }
|
||||
func (d *vfsgen۰DirInfo) IsDir() bool { return true }
|
||||
func (d *vfsgen۰DirInfo) Sys() interface{} { return nil }
|
||||
|
||||
// vfsgen۰Dir is an opened dir instance.
|
||||
type vfsgen۰Dir struct {
|
||||
*vfsgen۰DirInfo
|
||||
pos int // Position within entries for Seek and Readdir.
|
||||
}
|
||||
|
||||
func (d *vfsgen۰Dir) Seek(offset int64, whence int) (int64, error) {
|
||||
if offset == 0 && whence == io.SeekStart {
|
||||
d.pos = 0
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unsupported Seek in directory %s", d.name)
|
||||
}
|
||||
|
||||
func (d *vfsgen۰Dir) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if d.pos >= len(d.entries) && count > 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
if count <= 0 || count > len(d.entries)-d.pos {
|
||||
count = len(d.entries) - d.pos
|
||||
}
|
||||
e := d.entries[d.pos : d.pos+count]
|
||||
d.pos += count
|
||||
return e, nil
|
||||
}
|
||||
36
cmd/serve/httplib/serve/data/data.go
Normal file
36
cmd/serve/httplib/serve/data/data.go
Normal file
@@ -0,0 +1,36 @@
|
||||
//go:generate go run assets_generate.go
|
||||
// The "go:generate" directive compiles static assets by running assets_generate.go
|
||||
|
||||
package data
|
||||
|
||||
import (
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GetTemplate eturns the HTML template for serving directories via HTTP
|
||||
func GetTemplate() (tpl *template.Template, err error) {
|
||||
templateFile, err := Assets.Open("index.html")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get template open")
|
||||
}
|
||||
|
||||
defer fs.CheckClose(templateFile, &err)
|
||||
|
||||
templateBytes, err := ioutil.ReadAll(templateFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get template read")
|
||||
}
|
||||
|
||||
var templateString = string(templateBytes)
|
||||
|
||||
tpl, err = template.New("index").Parse(templateString)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get template parse")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
11
cmd/serve/httplib/serve/data/templates/index.html
Normal file
11
cmd/serve/httplib/serve/data/templates/index.html
Normal file
@@ -0,0 +1,11 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>{{ .Title }}</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>{{ .Title }}</h1>
|
||||
{{ range $i := .Entries }}<a href="{{ $i.URL }}">{{ $i.Leaf }}</a><br />
|
||||
{{ end }}</body>
|
||||
</html>
|
||||
@@ -21,17 +21,19 @@ type DirEntry struct {
|
||||
|
||||
// Directory represents a directory
|
||||
type Directory struct {
|
||||
DirRemote string
|
||||
Title string
|
||||
Entries []DirEntry
|
||||
Query string
|
||||
DirRemote string
|
||||
Title string
|
||||
Entries []DirEntry
|
||||
Query string
|
||||
HTMLTemplate *template.Template
|
||||
}
|
||||
|
||||
// NewDirectory makes an empty Directory
|
||||
func NewDirectory(dirRemote string) *Directory {
|
||||
func NewDirectory(dirRemote string, htmlTemplate *template.Template) *Directory {
|
||||
d := &Directory{
|
||||
DirRemote: dirRemote,
|
||||
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
|
||||
DirRemote: dirRemote,
|
||||
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
|
||||
HTMLTemplate: htmlTemplate,
|
||||
}
|
||||
return d
|
||||
}
|
||||
@@ -77,26 +79,10 @@ func (d *Directory) Serve(w http.ResponseWriter, r *http.Request) {
|
||||
defer accounting.Stats.DoneTransferring(d.DirRemote, true)
|
||||
|
||||
fs.Infof(d.DirRemote, "%s: Serving directory", r.RemoteAddr)
|
||||
err := indexTemplate.Execute(w, d)
|
||||
|
||||
err := d.HTMLTemplate.Execute(w, d)
|
||||
if err != nil {
|
||||
Error(d.DirRemote, w, "Failed to render template", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// indexPage is a directory listing template
|
||||
var indexPage = `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>{{ .Title }}</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>{{ .Title }}</h1>
|
||||
{{ range $i := .Entries }}<a href="{{ $i.URL }}">{{ $i.Leaf }}</a><br />
|
||||
{{ end }}</body>
|
||||
</html>
|
||||
`
|
||||
|
||||
// indexTemplate is the instantiated indexPage
|
||||
var indexTemplate = template.Must(template.New("index").Parse(indexPage))
|
||||
|
||||
@@ -2,23 +2,32 @@ package serve
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/cmd/serve/httplib/serve/data"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func GetTemplate(t *testing.T) *template.Template {
|
||||
htmlTemplate, err := data.GetTemplate()
|
||||
require.NoError(t, err)
|
||||
return htmlTemplate
|
||||
}
|
||||
|
||||
func TestNewDirectory(t *testing.T) {
|
||||
d := NewDirectory("z")
|
||||
d := NewDirectory("z", GetTemplate(t))
|
||||
assert.Equal(t, "z", d.DirRemote)
|
||||
assert.Equal(t, "Directory listing of /z", d.Title)
|
||||
}
|
||||
|
||||
func TestSetQuery(t *testing.T) {
|
||||
d := NewDirectory("z")
|
||||
d := NewDirectory("z", GetTemplate(t))
|
||||
assert.Equal(t, "", d.Query)
|
||||
d.SetQuery(url.Values{"potato": []string{"42"}})
|
||||
assert.Equal(t, "?potato=42", d.Query)
|
||||
@@ -27,7 +36,7 @@ func TestSetQuery(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddEntry(t *testing.T) {
|
||||
var d = NewDirectory("z")
|
||||
var d = NewDirectory("z", GetTemplate(t))
|
||||
d.AddEntry("", true)
|
||||
d.AddEntry("dir", true)
|
||||
d.AddEntry("a/b/c/d.txt", false)
|
||||
@@ -42,7 +51,7 @@ func TestAddEntry(t *testing.T) {
|
||||
}, d.Entries)
|
||||
|
||||
// Now test with a query parameter
|
||||
d = NewDirectory("z").SetQuery(url.Values{"potato": []string{"42"}})
|
||||
d = NewDirectory("z", GetTemplate(t)).SetQuery(url.Values{"potato": []string{"42"}})
|
||||
d.AddEntry("file", false)
|
||||
d.AddEntry("dir", true)
|
||||
assert.Equal(t, []DirEntry{
|
||||
@@ -62,7 +71,7 @@ func TestError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServe(t *testing.T) {
|
||||
d := NewDirectory("aDirectory")
|
||||
d := NewDirectory("aDirectory", GetTemplate(t))
|
||||
d.AddEntry("file", false)
|
||||
d.AddEntry("dir", true)
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package restic serves a remote suitable for use with restic
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
// Serve restic tests set up a server and run the integration tests
|
||||
// for restic against it.
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
11
cmd/serve/restic/restic_unsupported.go
Normal file
11
cmd/serve/restic/restic_unsupported.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// Command definition is nil to show not implemented
|
||||
var Command *cobra.Command = nil
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//+build !go1.10
|
||||
//+build go1.9,!go1.10
|
||||
|
||||
// Fallback deadline setting for pre go1.10
|
||||
|
||||
|
||||
@@ -13,8 +13,12 @@ import (
|
||||
|
||||
func init() {
|
||||
Command.AddCommand(http.Command)
|
||||
Command.AddCommand(webdav.Command)
|
||||
Command.AddCommand(restic.Command)
|
||||
if webdav.Command != nil {
|
||||
Command.AddCommand(webdav.Command)
|
||||
}
|
||||
if restic.Command != nil {
|
||||
Command.AddCommand(restic.Command)
|
||||
}
|
||||
if ftp.Command != nil {
|
||||
Command.AddCommand(ftp.Command)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//+build go1.9
|
||||
|
||||
package webdav
|
||||
|
||||
import (
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
//
|
||||
// We skip tests on platforms with troublesome character mappings
|
||||
|
||||
//+build !windows,!darwin
|
||||
//+build !windows,!darwin,go1.9
|
||||
|
||||
package webdav
|
||||
|
||||
|
||||
11
cmd/serve/webdav/webdav_unsupported.go
Normal file
11
cmd/serve/webdav/webdav_unsupported.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// Build for webdav for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package webdav
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// Command definition is nil to show not implemented
|
||||
var Command *cobra.Command = nil
|
||||
@@ -154,7 +154,7 @@ Contributors
|
||||
* Michael P. Dubner <pywebmail@list.ru>
|
||||
* Antoine GIRARD <sapk@users.noreply.github.com>
|
||||
* Mateusz Piotrowski <mpp302@gmail.com>
|
||||
* Animosity022 <animosity22@users.noreply.github.com>
|
||||
* Animosity022 <animosity22@users.noreply.github.com> <earl.texter@gmail.com>
|
||||
* Peter Baumgartner <pete@lincolnloop.com>
|
||||
* Craig Rachel <craig@craigrachel.com>
|
||||
* Michael G. Noll <miguno@users.noreply.github.com>
|
||||
@@ -217,3 +217,11 @@ Contributors
|
||||
* Peter Kaminski <kaminski@istori.com>
|
||||
* Henry Ptasinski <henry@logout.com>
|
||||
* Alexander <kharkovalexander@gmail.com>
|
||||
* Garry McNulty <garrmcnu@gmail.com>
|
||||
* Mathieu Carbou <mathieu.carbou@gmail.com>
|
||||
* Mark Otway <mark@otway.com>
|
||||
* William Cocker <37018962+WilliamCocker@users.noreply.github.com>
|
||||
* François Leurent <131.js@cloudyks.org>
|
||||
* Arkadius Stefanski <arkste@gmail.com>
|
||||
* Jay <dev@jaygoel.com>
|
||||
* andrea rota <a@xelera.eu>
|
||||
|
||||
@@ -98,7 +98,8 @@ excess files in the bucket.
|
||||
B2 supports multiple [Application Keys for different access permission
|
||||
to B2 Buckets](https://www.backblaze.com/b2/docs/application_keys.html).
|
||||
|
||||
You can use these with rclone too.
|
||||
You can use these with rclone too; you will need to use rclone version 1.43
|
||||
or later.
|
||||
|
||||
Follow Backblaze's docs to create an Application Key with the required
|
||||
permission and add the `Application Key ID` as the `account` and the
|
||||
@@ -181,8 +182,8 @@ versions of files, leaving the current ones intact. You can also
|
||||
supply a path and only old versions under that path will be deleted,
|
||||
eg `rclone cleanup remote:bucket/path/to/stuff`.
|
||||
|
||||
Note that `cleanup` does not remove partially uploaded files
|
||||
from the bucket.
|
||||
Note that `cleanup` will remove partially uploaded files from the bucket
|
||||
if they are more than a day old.
|
||||
|
||||
When you `purge` a bucket, the current and the old versions will be
|
||||
deleted then the bucket will be deleted.
|
||||
|
||||
@@ -267,6 +267,15 @@ Options
|
||||
|
||||
Rclone has a number of options to control its behaviour.
|
||||
|
||||
Options that take parameters can have the values passed in two ways,
|
||||
`--option=value` or `--option value`. However boolean (true/false)
|
||||
options behave slightly differently to the other options in that
|
||||
`--boolean` sets the option to `true` and the absence of the flag sets
|
||||
it to `false`. It is also possible to specify `--boolean=false` or
|
||||
`--boolean=true`. Note that `--boolean false` is not valid - this is
|
||||
parsed as `--boolean` and the `false` is parsed as an extra command
|
||||
line argument for rclone.
|
||||
|
||||
Options which use TIME use the go time parser. A duration string is a
|
||||
possibly signed sequence of decimal numbers, each with optional
|
||||
fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
|
||||
@@ -842,8 +851,8 @@ will fall back to the default behaviour and log an error level message
|
||||
to the console. Note: Encrypted destinations are not supported
|
||||
by `--track-renames`.
|
||||
|
||||
Note that `--track-renames` uses extra memory to keep track of all
|
||||
the rename candidates.
|
||||
Note that `--track-renames` is incompatible with `--no-traverse` and
|
||||
that it uses extra memory to keep track of all the rename candidates.
|
||||
|
||||
Note also that `--track-renames` is incompatible with
|
||||
`--delete-before` and will select `--delete-after` instead of
|
||||
@@ -1132,6 +1141,24 @@ This option defaults to `false`.
|
||||
|
||||
**This should be used only for testing.**
|
||||
|
||||
### --no-traverse ###
|
||||
|
||||
The `--no-traverse` flag controls whether the destination file system
|
||||
is traversed when using the `copy` or `move` commands.
|
||||
`--no-traverse` is not compatible with `sync` and will be ignored if
|
||||
you supply it with `sync`.
|
||||
|
||||
If you are only copying a small number of files (or are filtering most
|
||||
of the files) and/or have a large number of files on the destination
|
||||
then `--no-traverse` will stop rclone listing the destination and save
|
||||
time.
|
||||
|
||||
However, if you are copying a large number of files, especially if you
|
||||
are doing a copy where lots of the files under consideration haven't
|
||||
changed and won't need copying then you shouldn't use `--no-traverse`.
|
||||
|
||||
See [rclone copy](/commands/rclone_copy/) for an example of how to use it.
|
||||
|
||||
Filtering
|
||||
---------
|
||||
|
||||
|
||||
@@ -845,9 +845,7 @@ second that each client_id can do set by Google. rclone already has a
|
||||
high quota and I will continue to make sure it is high enough by
|
||||
contacting Google.
|
||||
|
||||
However you might find you get better performance making your own
|
||||
client_id if you are a heavy user. Or you may not depending on exactly
|
||||
how Google have been raising rclone's rate limit.
|
||||
It is strongly recommended to use your own client ID as the default rclone ID is heavily used. If you have multiple services running, it is recommended to use an API key for each service. The default Google quota is 10 transactions per second so it is recommended to stay under that number as if you use more than that, it will cause rclone to rate limit and make things slower.
|
||||
|
||||
Here is how to create your own Google Drive client ID for rclone:
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
set -e
|
||||
|
||||
#when adding a tool to the list make sure to also add it's corresponding command further in the script
|
||||
unzip_tools_list=('unzip' '7z', 'busybox')
|
||||
unzip_tools_list=('unzip' '7z' 'busybox')
|
||||
|
||||
usage() { echo "Usage: curl https://rclone.org/install.sh | sudo bash [-s beta]" 1>&2; exit 1; }
|
||||
|
||||
|
||||
@@ -242,13 +242,17 @@ platforms they are common. Rclone will map these names to and from an
|
||||
identical looking unicode equivalent. For example if a file has a `?`
|
||||
in it will be mapped to `?` instead.
|
||||
|
||||
The largest allowed file size is 10GiB (10,737,418,240 bytes).
|
||||
The largest allowed file sizes are 15GB for OneDrive for Business and 35GB for OneDrive Personal (Updated 4 Jan 2019).
|
||||
|
||||
The entire path, including the file name, must contain fewer than 400 characters for OneDrive, OneDrive for Business and SharePoint Online. If you are encrypting file and folder names with rclone, you may want to pay attention to this limitation because the encrypted names are typically longer than the original ones.
|
||||
|
||||
OneDrive seems to be OK with at least 50,000 files in a folder, but at
|
||||
100,000 rclone will get errors listing the directory like `couldn’t
|
||||
list files: UnknownError:`. See
|
||||
[#2707](https://github.com/ncw/rclone/issues/2707) for more info.
|
||||
|
||||
An official document about the limitations for different types of OneDrive can be found [here](https://support.office.com/en-us/article/invalid-file-names-and-file-types-in-onedrive-onedrive-for-business-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa).
|
||||
|
||||
### Versioning issue ###
|
||||
|
||||
Every change in OneDrive causes the service to create a new version.
|
||||
@@ -260,6 +264,16 @@ The `copy` is the only rclone command affected by this as we copy
|
||||
the file and then afterwards set the modification time to match the
|
||||
source file.
|
||||
|
||||
**Note**: Starting October 2018, users will no longer be able to disable versioning by default. This is because Microsoft has brought an [update](https://techcommunity.microsoft.com/t5/Microsoft-OneDrive-Blog/New-Updates-to-OneDrive-and-SharePoint-Team-Site-Versioning/ba-p/204390) to the mechanism. To change this new default setting, a PowerShell command is required to be run by a SharePoint admin. If you are an admin, you can run these commands in PowerShell to change that setting:
|
||||
|
||||
1. `Install-Module -Name Microsoft.Online.SharePoint.PowerShell` (in case you haven't installed this already)
|
||||
1. `Import-Module Microsoft.Online.SharePoint.PowerShell -DisableNameChecking`
|
||||
1. `Connect-SPOService -Url https://YOURSITE-admin.sharepoint.com -Credential YOU@YOURSITE.COM` (replacing `YOURSITE`, `YOU`, `YOURSITE.COM` with the actual values; this will prompt for your credentials)
|
||||
1. `Set-SPOTenant -EnableMinimumVersionRequirement $False`
|
||||
1. `Disconnect-SPOService` (to disconnect from the server)
|
||||
|
||||
*Below are the steps for normal users to disable versioning. If you don't see the "No Versioning" option, make sure the above requirements are met.*
|
||||
|
||||
User [Weropol](https://github.com/Weropol) has found a method to disable
|
||||
versioning on OneDrive
|
||||
|
||||
|
||||
@@ -217,6 +217,8 @@ Choose a number from below, or type in your own value
|
||||
\ "STANDARD_IA"
|
||||
5 / One Zone Infrequent Access storage class
|
||||
\ "ONEZONE_IA"
|
||||
6 / Glacier storage class
|
||||
\ "GLACIER"
|
||||
storage_class> 1
|
||||
Remote config
|
||||
--------------------
|
||||
@@ -266,8 +268,33 @@ The modified time is stored as metadata on the object as
|
||||
### Multipart uploads ###
|
||||
|
||||
rclone supports multipart uploads with S3 which means that it can
|
||||
upload files bigger than 5GB. Note that files uploaded *both* with
|
||||
multipart upload *and* through crypt remotes do not have MD5 sums.
|
||||
upload files bigger than 5GB.
|
||||
|
||||
Note that files uploaded *both* with multipart upload *and* through
|
||||
crypt remotes do not have MD5 sums.
|
||||
|
||||
Rclone switches from single part uploads to multipart uploads at the
|
||||
point specified by `--s3-upload-cutoff`. This can be a maximum of 5GB
|
||||
and a minimum of 0 (ie always upload mulipart files).
|
||||
|
||||
The chunk sizes used in the multipart upload are specified by
|
||||
`--s3-chunk-size` and the number of chunks uploaded concurrently is
|
||||
specified by `--s3-upload-concurrency`.
|
||||
|
||||
Multipart uploads will use `--transfers` * `--s3-upload-concurrency` *
|
||||
`--s3-chunk-size` extra memory. Single part uploads to not use extra
|
||||
memory.
|
||||
|
||||
Single part transfers can be faster than multipart transfers or slower
|
||||
depending on your latency from S3 - the more latency, the more likely
|
||||
single part transfers will be faster.
|
||||
|
||||
Increasing `--s3-upload-concurrency` will increase throughput (8 would
|
||||
be a sensible value) and increasing `--s3-chunk-size` also increases
|
||||
througput (16M would be sensible). Increasing either of these will
|
||||
use more memory. The default values are high enough to gain most of
|
||||
the possible performance without using too much memory.
|
||||
|
||||
|
||||
### Buckets and Regions ###
|
||||
|
||||
@@ -361,9 +388,9 @@ A proper fix is being worked on in [issue #1824](https://github.com/ncw/rclone/i
|
||||
|
||||
### Glacier ###
|
||||
|
||||
You can transition objects to glacier storage using a [lifecycle policy](http://docs.aws.amazon.com/AmazonS3/latest/user-guide/create-lifecycle.html).
|
||||
You can upload objects using the glacier storage class or transition them to glacier using a [lifecycle policy](http://docs.aws.amazon.com/AmazonS3/latest/user-guide/create-lifecycle.html).
|
||||
The bucket can still be synced or copied into normally, but if rclone
|
||||
tries to access the data you will see an error like below.
|
||||
tries to access data from the glacier storage class you will see an error like below.
|
||||
|
||||
2017/09/11 19:07:43 Failed to sync: failed to open source object: Object in GLACIER, restore first: path/to/file
|
||||
|
||||
@@ -827,17 +854,31 @@ The storage class to use when storing new objects in S3.
|
||||
- Standard Infrequent Access storage class
|
||||
- "ONEZONE_IA"
|
||||
- One Zone Infrequent Access storage class
|
||||
- "GLACIER"
|
||||
- GLACIER storage class
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)).
|
||||
|
||||
#### --s3-upload-cutoff
|
||||
|
||||
Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_S3_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
- Default: 200M
|
||||
|
||||
#### --s3-chunk-size
|
||||
|
||||
Chunk size to use for uploading.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this
|
||||
size. The default is 5MB. The minimum is 5MB.
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
@@ -882,7 +923,7 @@ this may help to speed up the transfers.
|
||||
- Config: upload_concurrency
|
||||
- Env Var: RCLONE_S3_UPLOAD_CONCURRENCY
|
||||
- Type: int
|
||||
- Default: 2
|
||||
- Default: 4
|
||||
|
||||
#### --s3-force-path-style
|
||||
|
||||
|
||||
@@ -127,6 +127,19 @@ does not take any path arguments.
|
||||
To view your current quota you can use the `rclone about remote:`
|
||||
command which will display your usage limit (quota) and the current usage.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
When uploading very large files (bigger than about 5GB) you will need
|
||||
to increase the `--timeout` parameter. This is because Yandex pauses
|
||||
(perhaps to calculate the MD5SUM for the entire file) before returning
|
||||
confirmation that the file has been uploaded. The default handling of
|
||||
timeouts in rclone is to assume a 5 minute pause is an error and close
|
||||
the connection - you'll see `net/http: timeout awaiting response
|
||||
headers` errors in the logs if this is happening. Setting the timeout
|
||||
to twice the max size of file in GB should be enough, so if you want
|
||||
to upload a 30GB file set a timeout of `2 * 30 = 60m`, that is
|
||||
`--timeout 60m`.
|
||||
|
||||
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/yandex/yandex.go then run make backenddocs -->
|
||||
### Standard Options
|
||||
|
||||
|
||||
@@ -62,6 +62,7 @@ type ConfigInfo struct {
|
||||
MaxDepth int
|
||||
IgnoreSize bool
|
||||
IgnoreChecksum bool
|
||||
NoTraverse bool
|
||||
NoUpdateModTime bool
|
||||
DataRateUnit string
|
||||
BackupDir string
|
||||
|
||||
@@ -575,6 +575,17 @@ func SetValueAndSave(name, key, value string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileGetFresh reads the config key under section return the value or
|
||||
// an error if the config file was not found or that value couldn't be
|
||||
// read.
|
||||
func FileGetFresh(section, key string) (value string, err error) {
|
||||
reloadedConfigFile, err := loadConfigFile()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return reloadedConfigFile.GetValue(section, key)
|
||||
}
|
||||
|
||||
// ShowRemotes shows an overview of the config file
|
||||
func ShowRemotes() {
|
||||
remotes := getConfigData().GetSectionList()
|
||||
|
||||
@@ -27,7 +27,6 @@ var (
|
||||
deleteAfter bool
|
||||
bindAddr string
|
||||
disableFeatures string
|
||||
noTraverse bool
|
||||
)
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
@@ -65,7 +64,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxDepth, "max-depth", "", fs.Config.MaxDepth, "If set limits the recursion depth to this.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreChecksum, "ignore-checksum", "", fs.Config.IgnoreChecksum, "Skip post copy check of checksums.")
|
||||
flags.BoolVarP(flagSet, &noTraverse, "no-traverse", "", noTraverse, "Obsolete - does nothing.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoTraverse, "no-traverse", "", fs.Config.NoTraverse, "Don't traverse destination file system on copy.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
|
||||
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
|
||||
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix for use with --backup-dir.")
|
||||
@@ -113,10 +112,6 @@ func SetFlags() {
|
||||
}
|
||||
}
|
||||
|
||||
if noTraverse {
|
||||
fs.Logf(nil, "--no-traverse is obsolete and no longer needed - please remove")
|
||||
}
|
||||
|
||||
if dumpHeaders {
|
||||
fs.Config.Dump |= fs.DumpHeaders
|
||||
fs.Logf(nil, "--dump-headers is obsolete - please use --dump headers instead")
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Active is the globally active filter
|
||||
@@ -511,17 +512,33 @@ func (f *Filter) MakeListR(NewObject func(remote string) (fs.Object, error)) fs.
|
||||
if !f.HaveFilesFrom() {
|
||||
return errFilesFromNotSet
|
||||
}
|
||||
var entries fs.DirEntries
|
||||
for remote := range f.files {
|
||||
entry, err := NewObject(remote)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// Skip files that are not found
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
var (
|
||||
remotes = make(chan string, fs.Config.Checkers)
|
||||
g errgroup.Group
|
||||
)
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
g.Go(func() (err error) {
|
||||
var entries = make(fs.DirEntries, 1)
|
||||
for remote := range remotes {
|
||||
entries[0], err = NewObject(remote)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// Skip files that are not found
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
err = callback(entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return callback(entries)
|
||||
for remote := range f.files {
|
||||
remotes <- remote
|
||||
}
|
||||
close(remotes)
|
||||
return g.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -220,7 +221,10 @@ func TestNewFilterMakeListR(t *testing.T) {
|
||||
|
||||
// NewObject function for MakeListR
|
||||
newObjects := FilesMap{}
|
||||
var newObjectMu sync.Mutex
|
||||
NewObject := func(remote string) (fs.Object, error) {
|
||||
newObjectMu.Lock()
|
||||
defer newObjectMu.Unlock()
|
||||
if remote == "notfound" {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
} else if remote == "error" {
|
||||
@@ -233,7 +237,10 @@ func TestNewFilterMakeListR(t *testing.T) {
|
||||
|
||||
// Callback for ListRFn
|
||||
listRObjects := FilesMap{}
|
||||
var callbackMu sync.Mutex
|
||||
listRcallback := func(entries fs.DirEntries) error {
|
||||
callbackMu.Lock()
|
||||
defer callbackMu.Unlock()
|
||||
for _, entry := range entries {
|
||||
listRObjects[entry.Remote()] = struct{}{}
|
||||
}
|
||||
|
||||
@@ -16,14 +16,17 @@ import (
|
||||
)
|
||||
|
||||
// March holds the data used to traverse two Fs simultaneously,
|
||||
// calling callback for each match
|
||||
// calling Callback for each match
|
||||
type March struct {
|
||||
// parameters
|
||||
ctx context.Context
|
||||
fdst fs.Fs
|
||||
fsrc fs.Fs
|
||||
dir string
|
||||
callback Marcher
|
||||
Ctx context.Context // context for background goroutines
|
||||
Fdst fs.Fs // source Fs
|
||||
Fsrc fs.Fs // dest Fs
|
||||
Dir string // directory
|
||||
NoTraverse bool // don't traverse the destination
|
||||
SrcIncludeAll bool // don't include all files in the src
|
||||
DstIncludeAll bool // don't include all files in the destination
|
||||
Callback Marcher // object to call with results
|
||||
// internal state
|
||||
srcListDir listDirFn // function to call to list a directory in the src
|
||||
dstListDir listDirFn // function to call to list a directory in the dst
|
||||
@@ -40,17 +43,12 @@ type Marcher interface {
|
||||
Match(dst, src fs.DirEntry) (recurse bool)
|
||||
}
|
||||
|
||||
// New sets up a march over fsrc, and fdst calling back callback for each match
|
||||
func New(ctx context.Context, fdst, fsrc fs.Fs, dir string, callback Marcher) *March {
|
||||
m := &March{
|
||||
ctx: ctx,
|
||||
fdst: fdst,
|
||||
fsrc: fsrc,
|
||||
dir: dir,
|
||||
callback: callback,
|
||||
// init sets up a march over opt.Fsrc, and opt.Fdst calling back callback for each match
|
||||
func (m *March) init() {
|
||||
m.srcListDir = m.makeListDir(m.Fsrc, m.SrcIncludeAll)
|
||||
if !m.NoTraverse {
|
||||
m.dstListDir = m.makeListDir(m.Fdst, m.DstIncludeAll)
|
||||
}
|
||||
m.srcListDir = m.makeListDir(fsrc, false)
|
||||
m.dstListDir = m.makeListDir(fdst, filter.Active.Opt.DeleteExcluded)
|
||||
// Now create the matching transform
|
||||
// ..normalise the UTF8 first
|
||||
m.transforms = append(m.transforms, norm.NFC.String)
|
||||
@@ -60,10 +58,9 @@ func New(ctx context.Context, fdst, fsrc fs.Fs, dir string, callback Marcher) *M
|
||||
// | Yes | No | No |
|
||||
// | No | Yes | Yes |
|
||||
// | Yes | Yes | Yes |
|
||||
if fdst.Features().CaseInsensitive {
|
||||
if m.Fdst.Features().CaseInsensitive {
|
||||
m.transforms = append(m.transforms, strings.ToLower)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// list a directory into entries, err
|
||||
@@ -86,7 +83,7 @@ func (m *March) makeListDir(f fs.Fs, includeAll bool) listDirFn {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if !started {
|
||||
dirs, dirsErr = walk.NewDirTree(f, m.dir, includeAll, fs.Config.MaxDepth)
|
||||
dirs, dirsErr = walk.NewDirTree(f, m.Dir, includeAll, fs.Config.MaxDepth)
|
||||
started = true
|
||||
}
|
||||
if dirsErr != nil {
|
||||
@@ -114,6 +111,8 @@ type listDirJob struct {
|
||||
|
||||
// Run starts the matching process off
|
||||
func (m *March) Run() {
|
||||
m.init()
|
||||
|
||||
srcDepth := fs.Config.MaxDepth
|
||||
if srcDepth < 0 {
|
||||
srcDepth = fs.MaxLevel
|
||||
@@ -133,7 +132,7 @@ func (m *March) Run() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
case <-m.Ctx.Done():
|
||||
return
|
||||
case job, ok := <-in:
|
||||
if !ok {
|
||||
@@ -147,7 +146,7 @@ func (m *March) Run() {
|
||||
// jobs off for traversal in the background
|
||||
for _, newJob := range jobs {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
case <-m.Ctx.Done():
|
||||
// discard job if finishing
|
||||
traversing.Done()
|
||||
case in <- newJob:
|
||||
@@ -164,14 +163,14 @@ func (m *March) Run() {
|
||||
// Start the process
|
||||
traversing.Add(1)
|
||||
in <- listDirJob{
|
||||
srcRemote: m.dir,
|
||||
srcRemote: m.Dir,
|
||||
srcDepth: srcDepth - 1,
|
||||
dstRemote: m.dir,
|
||||
dstRemote: m.Dir,
|
||||
dstDepth: dstDepth - 1,
|
||||
}
|
||||
go func() {
|
||||
// when the context is cancelled discard the remaining jobs
|
||||
<-m.ctx.Done()
|
||||
<-m.Ctx.Done()
|
||||
for range in {
|
||||
traversing.Done()
|
||||
}
|
||||
@@ -184,7 +183,7 @@ func (m *March) Run() {
|
||||
// Check to see if the context has been cancelled
|
||||
func (m *March) aborting() bool {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
case <-m.Ctx.Done():
|
||||
return true
|
||||
default:
|
||||
}
|
||||
@@ -348,7 +347,7 @@ func (m *March) processJob(job listDirJob) (jobs []listDirJob) {
|
||||
srcList, srcListErr = m.srcListDir(job.srcRemote)
|
||||
}()
|
||||
}
|
||||
if !job.noDst {
|
||||
if !m.NoTraverse && !job.noDst {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -371,13 +370,27 @@ func (m *March) processJob(job listDirJob) (jobs []listDirJob) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If NoTraverse is set, then try to find a matching object
|
||||
// for each item in the srcList
|
||||
if m.NoTraverse {
|
||||
for _, src := range srcList {
|
||||
if srcObj, ok := src.(fs.Object); ok {
|
||||
leaf := path.Base(srcObj.Remote())
|
||||
dstObj, err := m.Fdst.NewObject(path.Join(job.dstRemote, leaf))
|
||||
if err == nil {
|
||||
dstList = append(dstList, dstObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Work out what to do and do it
|
||||
srcOnly, dstOnly, matches := matchListings(srcList, dstList, m.transforms)
|
||||
for _, src := range srcOnly {
|
||||
if m.aborting() {
|
||||
return nil
|
||||
}
|
||||
recurse := m.callback.SrcOnly(src)
|
||||
recurse := m.Callback.SrcOnly(src)
|
||||
if recurse && job.srcDepth > 0 {
|
||||
jobs = append(jobs, listDirJob{
|
||||
srcRemote: src.Remote(),
|
||||
@@ -391,7 +404,7 @@ func (m *March) processJob(job listDirJob) (jobs []listDirJob) {
|
||||
if m.aborting() {
|
||||
return nil
|
||||
}
|
||||
recurse := m.callback.DstOnly(dst)
|
||||
recurse := m.Callback.DstOnly(dst)
|
||||
if recurse && job.dstDepth > 0 {
|
||||
jobs = append(jobs, listDirJob{
|
||||
dstRemote: dst.Remote(),
|
||||
@@ -404,7 +417,7 @@ func (m *March) processJob(job listDirJob) (jobs []listDirJob) {
|
||||
if m.aborting() {
|
||||
return nil
|
||||
}
|
||||
recurse := m.callback.Match(match.dst, match.src)
|
||||
recurse := m.Callback.Match(match.dst, match.src)
|
||||
if recurse && job.srcDepth > 0 && job.dstDepth > 0 {
|
||||
jobs = append(jobs, listDirJob{
|
||||
srcRemote: match.src.Remote(),
|
||||
|
||||
@@ -12,6 +12,7 @@ func MimeTypeFromName(remote string) (mimeType string) {
|
||||
if !strings.ContainsRune(mimeType, '/') {
|
||||
mimeType = "application/octet-stream"
|
||||
}
|
||||
mimeType = strings.Replace(mimeType, "; charset=utf-8", "", -1) // Remove charset
|
||||
return mimeType
|
||||
}
|
||||
|
||||
|
||||
@@ -685,7 +685,13 @@ func CheckFn(fdst, fsrc fs.Fs, check checkFn, oneway bool) error {
|
||||
}
|
||||
|
||||
// set up a march over fdst and fsrc
|
||||
m := march.New(context.Background(), fdst, fsrc, "", c)
|
||||
m := &march.March{
|
||||
Ctx: context.Background(),
|
||||
Fdst: fdst,
|
||||
Fsrc: fsrc,
|
||||
Dir: "",
|
||||
Callback: c,
|
||||
}
|
||||
fs.Infof(fdst, "Waiting for checks to finish")
|
||||
m.Run()
|
||||
|
||||
@@ -1331,6 +1337,14 @@ func RcatSize(fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modT
|
||||
accounting.Stats.Transferring(dstFileName)
|
||||
body := ioutil.NopCloser(in) // we let the server close the body
|
||||
in := accounting.NewAccountSizeName(body, size, dstFileName) // account the transfer (no buffering)
|
||||
|
||||
if fs.Config.DryRun {
|
||||
fs.Logf("stdin", "Not uploading as --dry-run")
|
||||
// prevents "broken pipe" errors
|
||||
_, err = io.Copy(ioutil.Discard, in)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
defer func() {
|
||||
closeErr := in.Close()
|
||||
|
||||
@@ -116,9 +116,6 @@ func init() {
|
||||
- srcRemote - a path within that remote eg "file.txt" for the source
|
||||
- dstFs - a remote name string eg "drive2:" for the destination
|
||||
- dstRemote - a path within that remote eg "file2.txt" for the destination
|
||||
|
||||
This returns
|
||||
- jobid - ID of async job to query with job/status
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -211,7 +211,7 @@ func (s *Server) handleOptions(w http.ResponseWriter, r *http.Request, path stri
|
||||
func (s *Server) serveRoot(w http.ResponseWriter, r *http.Request) {
|
||||
remotes := config.FileSections()
|
||||
sort.Strings(remotes)
|
||||
directory := serve.NewDirectory("")
|
||||
directory := serve.NewDirectory("", s.HTMLTemplate)
|
||||
directory.Title = "List of all rclone remotes."
|
||||
q := url.Values{}
|
||||
for _, remote := range remotes {
|
||||
@@ -235,7 +235,7 @@ func (s *Server) serveRemote(w http.ResponseWriter, r *http.Request, path string
|
||||
return
|
||||
}
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(path)
|
||||
directory := serve.NewDirectory(path, s.HTMLTemplate)
|
||||
for _, entry := range entries {
|
||||
_, isDir := entry.(fs.Directory)
|
||||
directory.AddEntry(entry.Remote(), isDir)
|
||||
|
||||
@@ -23,8 +23,6 @@ func init() {
|
||||
- srcFs - a remote name string eg "drive:src" for the source
|
||||
- dstFs - a remote name string eg "drive:dst" for the destination
|
||||
` + moveHelp + `
|
||||
This returns
|
||||
- jobid - ID of async job to query with job/status
|
||||
|
||||
See the [` + name + ` command](/commands/rclone_` + name + `/) command for more information on the above.`,
|
||||
})
|
||||
|
||||
@@ -29,6 +29,7 @@ type syncCopyMove struct {
|
||||
// internal state
|
||||
ctx context.Context // internal context for controlling go-routines
|
||||
cancel func() // cancel the context
|
||||
noTraverse bool // if set don't traverse the dst
|
||||
deletersWg sync.WaitGroup // for delete before go routine
|
||||
deleteFilesCh chan fs.Object // channel to receive deletes if delete before
|
||||
trackRenames bool // set if we should do server side renames
|
||||
@@ -75,6 +76,7 @@ func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
dstFilesResult: make(chan error, 1),
|
||||
dstEmptyDirs: make(map[string]fs.DirEntry),
|
||||
srcEmptyDirs: make(map[string]fs.DirEntry),
|
||||
noTraverse: fs.Config.NoTraverse,
|
||||
toBeChecked: newPipe(accounting.Stats.SetCheckQueue, fs.Config.MaxBacklog),
|
||||
toBeUploaded: newPipe(accounting.Stats.SetTransferQueue, fs.Config.MaxBacklog),
|
||||
deleteFilesCh: make(chan fs.Object, fs.Config.Checkers),
|
||||
@@ -84,6 +86,10 @@ func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
trackRenamesCh: make(chan fs.Object, fs.Config.Checkers),
|
||||
}
|
||||
s.ctx, s.cancel = context.WithCancel(context.Background())
|
||||
if s.noTraverse && s.deleteMode != fs.DeleteModeOff {
|
||||
fs.Errorf(nil, "Ignoring --no-traverse with sync")
|
||||
s.noTraverse = false
|
||||
}
|
||||
if s.trackRenames {
|
||||
// Don't track renames for remotes without server-side move support.
|
||||
if !operations.CanServerSideMove(fdst) {
|
||||
@@ -104,6 +110,10 @@ func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
if s.deleteMode != fs.DeleteModeOff {
|
||||
s.deleteMode = fs.DeleteModeAfter
|
||||
}
|
||||
if s.noTraverse {
|
||||
fs.Errorf(nil, "Ignoring --no-traverse with --track-renames")
|
||||
s.noTraverse = false
|
||||
}
|
||||
}
|
||||
// Make Fs for --backup-dir if required
|
||||
if fs.Config.BackupDir != "" {
|
||||
@@ -646,7 +656,15 @@ func (s *syncCopyMove) run() error {
|
||||
s.startTrackRenames()
|
||||
|
||||
// set up a march over fdst and fsrc
|
||||
m := march.New(s.ctx, s.fdst, s.fsrc, s.dir, s)
|
||||
m := &march.March{
|
||||
Ctx: s.ctx,
|
||||
Fdst: s.fdst,
|
||||
Fsrc: s.fsrc,
|
||||
Dir: s.dir,
|
||||
NoTraverse: s.noTraverse,
|
||||
Callback: s,
|
||||
DstIncludeAll: filter.Active.Opt.DeleteExcluded,
|
||||
}
|
||||
m.Run()
|
||||
|
||||
s.stopTrackRenames()
|
||||
|
||||
@@ -61,6 +61,41 @@ func TestCopy(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
}
|
||||
|
||||
// Now with --no-traverse
|
||||
func TestCopyNoTraverse(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.NoTraverse = true
|
||||
defer func() { fs.Config.NoTraverse = false }()
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
}
|
||||
|
||||
// Now with --no-traverse
|
||||
func TestSyncNoTraverse(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.NoTraverse = true
|
||||
defer func() { fs.Config.NoTraverse = false }()
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
}
|
||||
|
||||
// Test copy with depth
|
||||
func TestCopyWithDepth(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// Version of rclone
|
||||
var Version = "v1.45"
|
||||
var Version = "v1.45-DEV"
|
||||
|
||||
@@ -385,7 +385,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
t.Run("FsName", func(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
got := remote.Name()
|
||||
want := remoteName
|
||||
want := remoteName[:strings.IndexRune(remoteName, ':')+1]
|
||||
if isLocalRemote {
|
||||
want = "local:"
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v2"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Test describes an integration test to run with `go test`
|
||||
|
||||
39
go.mod
39
go.mod
@@ -2,25 +2,26 @@ module github.com/ncw/rclone
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
|
||||
cloud.google.com/go v0.31.0 // indirect
|
||||
cloud.google.com/go v0.33.1 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.1.8
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20181023070848-cf01652132cc
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
|
||||
github.com/Unknwon/goconfig v0.0.0-20180308125533-ef1e4c783f8f
|
||||
github.com/Unknwon/goconfig v0.0.0-20181105214110-56bd8ab18619
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6
|
||||
github.com/abbot/go-http-auth v0.4.0
|
||||
github.com/aws/aws-sdk-go v1.15.62
|
||||
github.com/aws/aws-sdk-go v1.15.81
|
||||
github.com/billziss-gh/cgofuse v1.1.0
|
||||
github.com/coreos/bbolt v0.0.0-20180318001526-af9db2027c98
|
||||
github.com/cpuguy83/go-md2man v1.0.8 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/djherbis/times v1.0.1
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible
|
||||
github.com/djherbis/times v1.1.0
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 // indirect
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jlaffaye/ftp v0.0.0-20181011150954-fe787349a520
|
||||
github.com/jlaffaye/ftp v0.0.0-20181101112434-47f21d10f0ee
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect
|
||||
@@ -28,8 +29,8 @@ require (
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.3 // indirect
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
github.com/ncw/swift v1.0.42
|
||||
github.com/nsf/termbox-go v0.0.0-20180819125858-b66b20ab708e
|
||||
github.com/ncw/swift v1.0.43
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pengsrc/go-shared v0.2.0 // indirect
|
||||
@@ -39,24 +40,26 @@ require (
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46
|
||||
github.com/russross/blackfriday v1.5.2 // indirect
|
||||
github.com/sevlyar/go-daemon v0.1.4
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 // indirect
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c // indirect
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/stretchr/testify v1.2.2
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20180817194457-854bf31d998b
|
||||
github.com/xanzy/ssh-agent v0.2.0
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible
|
||||
golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
|
||||
golang.org/x/sys v0.0.0-20181025063200-d989b31c8746
|
||||
golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a
|
||||
golang.org/x/oauth2 v0.0.0-20181120190819-8f65e3013eba
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b
|
||||
golang.org/x/text v0.3.0
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
|
||||
google.golang.org/api v0.0.0-20181025000501-39567f0042a0
|
||||
google.golang.org/appengine v1.2.0 // indirect
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c
|
||||
google.golang.org/api v0.0.0-20181120235003-faade3cbb06a
|
||||
google.golang.org/appengine v1.3.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.1
|
||||
)
|
||||
|
||||
75
go.sum
75
go.sum
@@ -1,21 +1,21 @@
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669 h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw=
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
cloud.google.com/go v0.31.0 h1:o9K5MWWt2wk+d9jkGn2DAZ7Q9nUdnFLOpK9eIkDwONQ=
|
||||
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.33.1 h1:fmJQWZ1w9PGkHR1YL/P7HloDvqlmKQ4Vpb7PC2e+aCk=
|
||||
cloud.google.com/go v0.33.1/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8 h1:KmVRa8oFMaargVesEuuEoiLCQ4zCCwQ8QX/xg++KS20=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20181023070848-cf01652132cc h1:BElWmFfsryQD72OcovStKpkIcd4e9ozSkdsTNQDSHGk=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20181023070848-cf01652132cc/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Unknwon/goconfig v0.0.0-20180308125533-ef1e4c783f8f h1:2h0zBHX3qNDltgnb2FiMJWYTcqJVDdUeHQU2/5CTc5w=
|
||||
github.com/Unknwon/goconfig v0.0.0-20180308125533-ef1e4c783f8f/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw=
|
||||
github.com/Unknwon/goconfig v0.0.0-20181105214110-56bd8ab18619 h1:6X8iB881g299aNEv6KXrcjL31iLOH7yA6NXoQX+MbDg=
|
||||
github.com/Unknwon/goconfig v0.0.0-20181105214110-56bd8ab18619/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw=
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6 h1:UCQe3W9LxwL2ff5r0PqQfS6Oe5MCKpIH8twfK/dH9mw=
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg=
|
||||
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
|
||||
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
|
||||
github.com/aws/aws-sdk-go v1.15.62 h1:07s1rZRzXJWlrTvHR8ZZb6DjoQTFnxqraE7y5teA8Og=
|
||||
github.com/aws/aws-sdk-go v1.15.62/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
|
||||
github.com/aws/aws-sdk-go v1.15.81 h1:va7uoFaV9uKAtZ6BTmp1u7paoMsizYRRLvRuoC07nQ8=
|
||||
github.com/aws/aws-sdk-go v1.15.81/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
|
||||
github.com/billziss-gh/cgofuse v1.1.0 h1:tATn9ZDvuPcOVlvR4tJitGHgAqy1y18+4mKmRfdfjec=
|
||||
github.com/billziss-gh/cgofuse v1.1.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/coreos/bbolt v0.0.0-20180318001526-af9db2027c98 h1:0gQU5Ebjs1V8Ow5bBzxZzr0peNjJILSkSb30IfZtshQ=
|
||||
@@ -24,10 +24,15 @@ github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6v
|
||||
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/djherbis/times v1.0.1 h1:nVRrVOTFd2r0C7wCQdIDz/fqt8yO0EEzr5f6aXfXiS0=
|
||||
github.com/djherbis/times v1.0.1/go.mod h1:CGMZlo255K5r4Yw0b9RRfFQpM2y7uOmxg4jm9HsaVf8=
|
||||
github.com/djherbis/times v1.1.0 h1:NFhBDODme0XNX+/5ETW9qL6v3Ty57psiXIQBrzzg44E=
|
||||
github.com/djherbis/times v1.1.0/go.mod h1:CGMZlo255K5r4Yw0b9RRfFQpM2y7uOmxg4jm9HsaVf8=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible h1:ZFvUIiBbGhDY5zF8yjLoWhUAYs7uDodUpbvTS5oelDE=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.0+incompatible h1:FQu9Ef2dkC8g2rQmcQmpXXeoRegXHODBfveKKZu6+e8=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.1-0.20181205034806-56e5f6595305+incompatible h1:4HSS6BiPqvgsn/zrwt6KOYY+mw153zmhvewZIRh1+Ds=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.1-0.20181205034806-56e5f6595305+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible h1:9jnukMIowLSo3SY7+GTwxmYJv4QC0LxXbo97zHWCyoc=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 h1:cC0Hbb+18DJ4i6ybqDybvj4wdIDS4vnD0QEci98PgM8=
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9/go.mod h1:GpOj6zuVBG3Inr9qjEnuVTgBlk2lZ1S9DcoFiXWyKss=
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1 h1:WjgeEHEDLGx56ndxS6FYi6qFjZGajSVHPuEPdpJ60cI=
|
||||
@@ -36,12 +41,12 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jlaffaye/ftp v0.0.0-20181011150954-fe787349a520 h1:VFHAQNYcjE5grUH5XOA6NPRFURBIcVt4A6R9G4fwH5c=
|
||||
github.com/jlaffaye/ftp v0.0.0-20181011150954-fe787349a520/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jlaffaye/ftp v0.0.0-20181101112434-47f21d10f0ee h1:oCvgfeGIc6GipidJVyG0Hd9R/w6TO8bBYyJg15ZgJkw=
|
||||
github.com/jlaffaye/ftp v0.0.0-20181101112434-47f21d10f0ee/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
@@ -63,8 +68,10 @@ github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zX
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
|
||||
github.com/ncw/swift v1.0.42 h1:ztvRb6hs52IHOcaYt73f9lXYLIeIuWgdooRDhdyllGI=
|
||||
github.com/ncw/swift v1.0.42/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nsf/termbox-go v0.0.0-20180819125858-b66b20ab708e h1:fvw0uluMptljaRKSU8459cJ4bmi3qUYyMs5kzpic2fY=
|
||||
github.com/nsf/termbox-go v0.0.0-20180819125858-b66b20ab708e/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/ncw/swift v1.0.43 h1:TZn2l/bPV0CqG+/G5BFh/ROWnyX7dL2D0URaOjNQRsw=
|
||||
github.com/ncw/swift v1.0.43/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed h1:bAVGG6B+R5qpSylrrA+BAMrzYkdAoiTaKPVxRB+4cyM=
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
@@ -83,12 +90,16 @@ github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNue
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sevlyar/go-daemon v0.1.4 h1:Ayxp/9SNHwPBjV+kKbnHl2ch6rhxTu08jfkGkoxgULQ=
|
||||
github.com/sevlyar/go-daemon v0.1.4/go.mod h1:6dJpPatBT9eUwM5VCw9Bt6CdX9Tk6UWvhW3MebLDRKE=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c h1:fyKiXKO1/I/B6Y2U8T7WdQGWzwehOuGIrljPtt7YTTI=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a h1:JSvGDIbmil4Ui/dDdFBExb7/cmkNjyX5F97oglmvCDo=
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
@@ -101,25 +112,25 @@ github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro
|
||||
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible h1:/Z0q3/eSMoPYAuRmhjWtuGSmVVciFC6hfm3yfCKuvz0=
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible/go.mod h1:w6wqLDQ5bBTzxGJ55581UrSwLrsTAsdo9N6yX/8d9RY=
|
||||
golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a h1:Pg1/+l4/QV6z7N506eGnLiJ/Rl4IJf1FwYQKvP51OjA=
|
||||
golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869 h1:kkXA53yGe04D0adEYJwEVQjeBppL01Exg+fnMjfUraU=
|
||||
golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519 h1:x6rhz8Y9CjbgQkccRGmELH6K+LJj7tOoh3XWeC1yaQM=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4 h1:99CA0JJbUX4ozCnLon680Jc9e0T1i8HCaLVJMwtI8Hc=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20181025063200-d989b31c8746 h1:zTiiIq2XH/ldZGPA59ILL7NbDlz/btn3iJvO7H57mY8=
|
||||
golang.org/x/sys v0.0.0-20181025063200-d989b31c8746/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20181120190819-8f65e3013eba h1:YDkOrzGLLYybtuP6ZgebnO4OWYEYVMFSniazXsxrFN8=
|
||||
golang.org/x/oauth2 v0.0.0-20181120190819-8f65e3013eba/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b h1:MQE+LT/ABUuuvEZ+YQAMSXindAdUh7slEmAkup74op4=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
google.golang.org/api v0.0.0-20181025000501-39567f0042a0 h1:6a8YRrm+EJS4Y9FGTkLfE2iKneLzRQG5CfJCfJ9AfS0=
|
||||
google.golang.org/api v0.0.0-20181025000501-39567f0042a0/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/appengine v1.2.0 h1:S0iUepdCWODXRvtE+gcRDd15L+k+k1AiHlMiMjefH24=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
google.golang.org/api v0.0.0-20181120235003-faade3cbb06a h1:yMfgT1baklxtECXVk3UtZBELVXtVhDbK3/7xLFkFypw=
|
||||
google.golang.org/api v0.0.0-20181120235003-faade3cbb06a/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/appengine v1.3.0 h1:FBSsiFRMz3LBeXIomRnVzrQwSDj4ibvcRexLG0LZGQk=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
@@ -153,6 +153,30 @@ type TokenSource struct {
|
||||
expiryTimer *time.Timer // signals whenever the token expires
|
||||
}
|
||||
|
||||
// If token has expired then first try re-reading it from the config
|
||||
// file in case a concurrently runnng rclone has updated it already
|
||||
func (ts *TokenSource) reReadToken() bool {
|
||||
tokenString, err := config.FileGetFresh(ts.name, config.ConfigToken)
|
||||
if err != nil {
|
||||
fs.Debugf(ts.name, "Failed to read token out of config file: %v", err)
|
||||
return false
|
||||
}
|
||||
newToken := new(oauth2.Token)
|
||||
err = json.Unmarshal([]byte(tokenString), newToken)
|
||||
if err != nil {
|
||||
fs.Debugf(ts.name, "Failed to parse token out of config file: %v", err)
|
||||
return false
|
||||
}
|
||||
if !newToken.Valid() {
|
||||
fs.Debugf(ts.name, "Loaded invalid token from config file - ignoring")
|
||||
return false
|
||||
}
|
||||
fs.Debugf(ts.name, "Loaded fresh token from config file")
|
||||
ts.token = newToken
|
||||
ts.tokenSource = nil // invalidate since we changed the token
|
||||
return true
|
||||
}
|
||||
|
||||
// Token returns a token or an error.
|
||||
// Token must be safe for concurrent use by multiple goroutines.
|
||||
// The returned Token must not be modified.
|
||||
@@ -161,17 +185,39 @@ type TokenSource struct {
|
||||
func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
||||
ts.mu.Lock()
|
||||
defer ts.mu.Unlock()
|
||||
var (
|
||||
token *oauth2.Token
|
||||
err error
|
||||
changed = false
|
||||
)
|
||||
const maxTries = 5
|
||||
|
||||
// Make a new token source if required
|
||||
if ts.tokenSource == nil {
|
||||
ts.tokenSource = ts.config.TokenSource(ts.ctx, ts.token)
|
||||
// Try getting the token a few times
|
||||
for i := 1; i <= maxTries; i++ {
|
||||
// Try reading the token from the config file in case it has
|
||||
// been updated by a concurrent rclone process
|
||||
if !ts.token.Valid() {
|
||||
if ts.reReadToken() {
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
|
||||
// Make a new token source if required
|
||||
if ts.tokenSource == nil {
|
||||
ts.tokenSource = ts.config.TokenSource(ts.ctx, ts.token)
|
||||
}
|
||||
|
||||
token, err = ts.tokenSource.Token()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
fs.Debugf(ts.name, "Token refresh failed try %d/%d: %v", i, maxTries, err)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
token, err := ts.tokenSource.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
changed := *token != *ts.token
|
||||
changed = changed || (*token != *ts.token)
|
||||
ts.token = token
|
||||
if changed {
|
||||
// Bump on the expiry timer if it is set
|
||||
@@ -358,7 +404,7 @@ func doConfig(id, name string, m configmap.Mapper, errorHandler func(*http.Reque
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return PutToken(name, m, token, false)
|
||||
return PutToken(name, m, token, true)
|
||||
}
|
||||
case TitleBarRedirectURL:
|
||||
useWebServer = automatic
|
||||
|
||||
6
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
6
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@@ -20,6 +20,7 @@
|
||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -31,9 +32,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -143,7 +141,7 @@ func testOnGCE() bool {
|
||||
go func() {
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := ctxhttp.Do(ctx, defaultClient.hc, req)
|
||||
res, err := defaultClient.hc.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
resc <- false
|
||||
return
|
||||
|
||||
2
vendor/github.com/Unknwon/goconfig/read.go
generated
vendored
2
vendor/github.com/Unknwon/goconfig/read.go
generated
vendored
@@ -72,7 +72,7 @@ func (c *ConfigFile) read(reader io.Reader) (err error) {
|
||||
comments += LineBreak + line
|
||||
}
|
||||
continue
|
||||
case line[0] == '[' && line[lineLengh-1] == ']': // New sction.
|
||||
case line[0] == '[' && line[lineLengh-1] == ']': // New section.
|
||||
// Get section name.
|
||||
section = strings.TrimSpace(line[1 : lineLengh-1])
|
||||
// Set section comments and empty if it has comments.
|
||||
|
||||
40
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
40
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
@@ -45,8 +45,8 @@ type Config struct {
|
||||
// that overrides the default generated endpoint for a client. Set this
|
||||
// to `""` to use the default generated endpoint.
|
||||
//
|
||||
// @note You must still provide a `Region` value when specifying an
|
||||
// endpoint for a client.
|
||||
// Note: You must still provide a `Region` value when specifying an
|
||||
// endpoint for a client.
|
||||
Endpoint *string
|
||||
|
||||
// The resolver to use for looking up endpoints for AWS service clients
|
||||
@@ -65,8 +65,8 @@ type Config struct {
|
||||
// noted. A full list of regions is found in the "Regions and Endpoints"
|
||||
// document.
|
||||
//
|
||||
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
// AWS Regions and Endpoints
|
||||
// See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
|
||||
// Regions and Endpoints.
|
||||
Region *string
|
||||
|
||||
// Set this to `true` to disable SSL when sending requests. Defaults
|
||||
@@ -120,9 +120,10 @@ type Config struct {
|
||||
// will use virtual hosted bucket addressing when possible
|
||||
// (`http://BUCKET.s3.amazonaws.com/KEY`).
|
||||
//
|
||||
// @note This configuration option is specific to the Amazon S3 service.
|
||||
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
|
||||
// Amazon S3: Virtual Hosting of Buckets
|
||||
// Note: This configuration option is specific to the Amazon S3 service.
|
||||
//
|
||||
// See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
|
||||
// for Amazon S3: Virtual Hosting of Buckets
|
||||
S3ForcePathStyle *bool
|
||||
|
||||
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
|
||||
@@ -223,6 +224,21 @@ type Config struct {
|
||||
// Key: aws.String("//foo//bar//moo"),
|
||||
// })
|
||||
DisableRestProtocolURICleaning *bool
|
||||
|
||||
// EnableEndpointDiscovery will allow for endpoint discovery on operations that
|
||||
// have the definition in its model. By default, endpoint discovery is off.
|
||||
//
|
||||
// Example:
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// EnableEndpointDiscovery: aws.Bool(true),
|
||||
// }))
|
||||
//
|
||||
// svc := s3.New(sess)
|
||||
// out, err := svc.GetObject(&s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucketname"),
|
||||
// Key: aws.String("/foo/bar/moo"),
|
||||
// })
|
||||
EnableEndpointDiscovery *bool
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config pointer that can be chained with builder
|
||||
@@ -377,6 +393,12 @@ func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEndpointDiscovery will set whether or not to use endpoint discovery.
|
||||
func (c *Config) WithEndpointDiscovery(t bool) *Config {
|
||||
c.EnableEndpointDiscovery = &t
|
||||
return c
|
||||
}
|
||||
|
||||
// MergeIn merges the passed in configs into the existing config object.
|
||||
func (c *Config) MergeIn(cfgs ...*Config) {
|
||||
for _, other := range cfgs {
|
||||
@@ -476,6 +498,10 @@ func mergeInConfig(dst *Config, other *Config) {
|
||||
if other.EnforceShouldRetryCheck != nil {
|
||||
dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
|
||||
}
|
||||
|
||||
if other.EnableEndpointDiscovery != nil {
|
||||
dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
|
||||
}
|
||||
}
|
||||
|
||||
// Copy will return a shallow copy of the Config object. If any additional
|
||||
|
||||
4
vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
generated
vendored
@@ -9,9 +9,7 @@ var (
|
||||
// providers in the ChainProvider.
|
||||
//
|
||||
// This has been deprecated. For verbose error messaging set
|
||||
// aws.Config.CredentialsChainVerboseErrors to true
|
||||
//
|
||||
// @readonly
|
||||
// aws.Config.CredentialsChainVerboseErrors to true.
|
||||
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
|
||||
`no valid providers in chain. Deprecated.
|
||||
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
@@ -64,8 +64,6 @@ import (
|
||||
// Credentials: credentials.AnonymousCredentials,
|
||||
// })))
|
||||
// // Access public S3 buckets.
|
||||
//
|
||||
// @readonly
|
||||
var AnonymousCredentials = NewStaticCredentials("", "", "")
|
||||
|
||||
// A Value is the AWS credentials value for individual credential fields.
|
||||
|
||||
4
vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
generated
vendored
@@ -12,14 +12,10 @@ const EnvProviderName = "EnvProvider"
|
||||
var (
|
||||
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
|
||||
// found in the process's environment.
|
||||
//
|
||||
// @readonly
|
||||
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
|
||||
|
||||
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
|
||||
// can't be found in the process's environment.
|
||||
//
|
||||
// @readonly
|
||||
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
|
||||
)
|
||||
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
@@ -9,8 +9,6 @@ const StaticProviderName = "StaticProvider"
|
||||
|
||||
var (
|
||||
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
|
||||
//
|
||||
// @readonly
|
||||
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
|
||||
)
|
||||
|
||||
|
||||
9
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
@@ -95,7 +95,12 @@ func custAddS3DualStack(p *partition) {
|
||||
return
|
||||
}
|
||||
|
||||
s, ok := p.Services["s3"]
|
||||
custAddDualstack(p, "s3")
|
||||
custAddDualstack(p, "s3-control")
|
||||
}
|
||||
|
||||
func custAddDualstack(p *partition, svcName string) {
|
||||
s, ok := p.Services[svcName]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -103,7 +108,7 @@ func custAddS3DualStack(p *partition) {
|
||||
s.Defaults.HasDualStack = boxedTrue
|
||||
s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
|
||||
|
||||
p.Services["s3"] = s
|
||||
p.Services[svcName] = s
|
||||
}
|
||||
|
||||
func custAddEC2Metadata(p *partition) {
|
||||
|
||||
380
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
380
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
@@ -40,6 +40,7 @@ const (
|
||||
|
||||
// AWS GovCloud (US) partition's regions.
|
||||
const (
|
||||
UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East).
|
||||
UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US).
|
||||
)
|
||||
|
||||
@@ -54,12 +55,14 @@ const (
|
||||
ApigatewayServiceID = "apigateway" // Apigateway.
|
||||
ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
|
||||
Appstream2ServiceID = "appstream2" // Appstream2.
|
||||
AppsyncServiceID = "appsync" // Appsync.
|
||||
AthenaServiceID = "athena" // Athena.
|
||||
AutoscalingServiceID = "autoscaling" // Autoscaling.
|
||||
AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
|
||||
BatchServiceID = "batch" // Batch.
|
||||
BudgetsServiceID = "budgets" // Budgets.
|
||||
CeServiceID = "ce" // Ce.
|
||||
ChimeServiceID = "chime" // Chime.
|
||||
Cloud9ServiceID = "cloud9" // Cloud9.
|
||||
ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
|
||||
CloudformationServiceID = "cloudformation" // Cloudformation.
|
||||
@@ -148,6 +151,7 @@ const (
|
||||
RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
|
||||
RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
|
||||
S3ServiceID = "s3" // S3.
|
||||
S3ControlServiceID = "s3-control" // S3Control.
|
||||
SdbServiceID = "sdb" // Sdb.
|
||||
SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
|
||||
ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
|
||||
@@ -337,11 +341,16 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -405,6 +414,21 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"appsync": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"athena": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -504,6 +528,23 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"chime": service{
|
||||
PartitionEndpoint: "aws-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
Defaults: endpoint{
|
||||
SSLCommonName: "service.chime.aws.amazon.com",
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"aws-global": endpoint{
|
||||
Hostname: "service.chime.aws.amazon.com",
|
||||
Protocols: []string{"https"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"cloud9": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -584,6 +625,7 @@ var awsPartition = partition{
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@@ -591,6 +633,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
@@ -689,11 +732,17 @@ var awsPartition = partition{
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"fips": endpoint{
|
||||
Hostname: "codecommit-fips.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"codedeploy": service{
|
||||
@@ -711,9 +760,33 @@ var awsPartition = partition{
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "codedeploy-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{},
|
||||
"us-east-2-fips": endpoint{
|
||||
Hostname: "codedeploy-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-1-fips": endpoint{
|
||||
Hostname: "codedeploy-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "codedeploy-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"codepipeline": service{
|
||||
@@ -1284,6 +1357,7 @@ var awsPartition = partition{
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
@@ -1417,6 +1491,7 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@@ -1592,6 +1667,7 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
@@ -1698,6 +1774,12 @@ var awsPartition = partition{
|
||||
"neptune": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-central-1": endpoint{
|
||||
Hostname: "rds.eu-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-central-1",
|
||||
},
|
||||
},
|
||||
"eu-west-1": endpoint{
|
||||
Hostname: "rds.eu-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
@@ -1784,6 +1866,7 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -1916,11 +1999,16 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -1983,6 +2071,150 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"s3-control": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
|
||||
HasDualStack: boxedTrue,
|
||||
DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{
|
||||
Hostname: "s3-control.ap-northeast-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-northeast-1",
|
||||
},
|
||||
},
|
||||
"ap-northeast-2": endpoint{
|
||||
Hostname: "s3-control.ap-northeast-2.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-northeast-2",
|
||||
},
|
||||
},
|
||||
"ap-south-1": endpoint{
|
||||
Hostname: "s3-control.ap-south-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-south-1",
|
||||
},
|
||||
},
|
||||
"ap-southeast-1": endpoint{
|
||||
Hostname: "s3-control.ap-southeast-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-southeast-1",
|
||||
},
|
||||
},
|
||||
"ap-southeast-2": endpoint{
|
||||
Hostname: "s3-control.ap-southeast-2.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-southeast-2",
|
||||
},
|
||||
},
|
||||
"ca-central-1": endpoint{
|
||||
Hostname: "s3-control.ca-central-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
},
|
||||
"eu-central-1": endpoint{
|
||||
Hostname: "s3-control.eu-central-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-central-1",
|
||||
},
|
||||
},
|
||||
"eu-west-1": endpoint{
|
||||
Hostname: "s3-control.eu-west-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-west-1",
|
||||
},
|
||||
},
|
||||
"eu-west-2": endpoint{
|
||||
Hostname: "s3-control.eu-west-2.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-west-2",
|
||||
},
|
||||
},
|
||||
"eu-west-3": endpoint{
|
||||
Hostname: "s3-control.eu-west-3.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-west-3",
|
||||
},
|
||||
},
|
||||
"sa-east-1": endpoint{
|
||||
Hostname: "s3-control.sa-east-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "sa-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-1": endpoint{
|
||||
Hostname: "s3-control.us-east-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "s3-control-fips.us-east-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{
|
||||
Hostname: "s3-control.us-east-2.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-east-2-fips": endpoint{
|
||||
Hostname: "s3-control-fips.us-east-2.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-1": endpoint{
|
||||
Hostname: "s3-control.us-west-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"us-west-1-fips": endpoint{
|
||||
Hostname: "s3-control-fips.us-west-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{
|
||||
Hostname: "s3-control.us-west-2.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "s3-control-fips.us-west-2.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"sdb": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
@@ -2574,6 +2806,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@@ -2690,6 +2923,13 @@ var awscnPartition = partition{
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"dms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ds": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -2775,6 +3015,7 @@ var awscnPartition = partition{
|
||||
"es": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -2871,6 +3112,28 @@ var awscnPartition = partition{
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"s3-control": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{
|
||||
Hostname: "s3-control.cn-north-1.amazonaws.com.cn",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "cn-north-1",
|
||||
},
|
||||
},
|
||||
"cn-northwest-1": endpoint{
|
||||
Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "cn-northwest-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"sms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -2973,6 +3236,9 @@ var awsusgovPartition = partition{
|
||||
SignatureVersions: []string{"v4"},
|
||||
},
|
||||
Regions: regions{
|
||||
"us-gov-east-1": region{
|
||||
Description: "AWS GovCloud (US-East)",
|
||||
},
|
||||
"us-gov-west-1": region{
|
||||
Description: "AWS GovCloud (US)",
|
||||
},
|
||||
@@ -2981,6 +3247,7 @@ var awsusgovPartition = partition{
|
||||
"acm": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -2993,26 +3260,36 @@ var awsusgovPartition = partition{
|
||||
"apigateway": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"application-autoscaling": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"autoscaling": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"clouddirectory": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"cloudformation": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -3035,36 +3312,48 @@ var awsusgovPartition = partition{
|
||||
"cloudtrail": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"codedeploy": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
"us-gov-west-1-fips": endpoint{
|
||||
Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"config": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"directconnect": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"dms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"dynamodb": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
"us-gov-west-1-fips": endpoint{
|
||||
Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
|
||||
@@ -3077,6 +3366,7 @@ var awsusgovPartition = partition{
|
||||
"ec2": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -3094,12 +3384,14 @@ var awsusgovPartition = partition{
|
||||
"ecr": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ecs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -3112,18 +3404,21 @@ var awsusgovPartition = partition{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"elasticbeanstalk": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"elasticloadbalancing": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
@@ -3132,6 +3427,7 @@ var awsusgovPartition = partition{
|
||||
"elasticmapreduce": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
@@ -3146,17 +3442,28 @@ var awsusgovPartition = partition{
|
||||
"events": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"glacier": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"guardduty": service{
|
||||
IsRegionalized: boxedTrue,
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"iam": service{
|
||||
PartitionEndpoint: "aws-us-gov-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
@@ -3173,6 +3480,7 @@ var awsusgovPartition = partition{
|
||||
"inspector": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -3189,24 +3497,28 @@ var awsusgovPartition = partition{
|
||||
"kinesis": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"kms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"lambda": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"logs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -3223,6 +3535,7 @@ var awsusgovPartition = partition{
|
||||
"monitoring": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -3235,12 +3548,14 @@ var awsusgovPartition = partition{
|
||||
"rds": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"redshift": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -3267,15 +3582,56 @@ var awsusgovPartition = partition{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{
|
||||
Hostname: "s3.us-gov-east-1.amazonaws.com",
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
"us-gov-west-1": endpoint{
|
||||
Hostname: "s3.us-gov-west-1.amazonaws.com",
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"s3-control": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{
|
||||
Hostname: "s3-control.us-gov-east-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1-fips": endpoint{
|
||||
Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
"us-gov-west-1": endpoint{
|
||||
Hostname: "s3-control.us-gov-west-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-west-1-fips": endpoint{
|
||||
Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com",
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"sms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -3288,6 +3644,7 @@ var awsusgovPartition = partition{
|
||||
"sns": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
@@ -3296,6 +3653,7 @@ var awsusgovPartition = partition{
|
||||
"sqs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{
|
||||
SSLCommonName: "{region}.queue.{dnsSuffix}",
|
||||
Protocols: []string{"http", "https"},
|
||||
@@ -3305,12 +3663,14 @@ var awsusgovPartition = partition{
|
||||
"ssm": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"states": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -3327,6 +3687,7 @@ var awsusgovPartition = partition{
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
"us-gov-west-1-fips": endpoint{
|
||||
Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
|
||||
@@ -3339,18 +3700,21 @@ var awsusgovPartition = partition{
|
||||
"sts": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"swf": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"tagging": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
|
||||
4
vendor/github.com/aws/aws-sdk-go/aws/errors.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/errors.go
generated
vendored
@@ -5,13 +5,9 @@ import "github.com/aws/aws-sdk-go/aws/awserr"
|
||||
var (
|
||||
// ErrMissingRegion is an error that is returned if region configuration is
|
||||
// not found.
|
||||
//
|
||||
// @readonly
|
||||
ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
|
||||
|
||||
// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
|
||||
// resolved for a service.
|
||||
//
|
||||
// @readonly
|
||||
ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
|
||||
)
|
||||
|
||||
8
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
@@ -266,7 +266,9 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
||||
}
|
||||
|
||||
// Presign returns the request's signed URL. Error will be returned
|
||||
// if the signing fails.
|
||||
// if the signing fails. The expire parameter is only used for presigned Amazon
|
||||
// S3 API requests. All other AWS services will use a fixed expriation
|
||||
// time of 15 minutes.
|
||||
//
|
||||
// It is invalid to create a presigned URL with a expire duration 0 or less. An
|
||||
// error is returned if expire duration is 0 or less.
|
||||
@@ -283,7 +285,9 @@ func (r *Request) Presign(expire time.Duration) (string, error) {
|
||||
}
|
||||
|
||||
// PresignRequest behaves just like presign, with the addition of returning a
|
||||
// set of headers that were signed.
|
||||
// set of headers that were signed. The expire parameter is only used for
|
||||
// presigned Amazon S3 API requests. All other AWS services will use a fixed
|
||||
// expriation time of 15 minutes.
|
||||
//
|
||||
// It is invalid to create a presigned URL with a expire duration 0 or less. An
|
||||
// error is returned if expire duration is 0 or less.
|
||||
|
||||
17
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
17
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
)
|
||||
@@ -101,6 +102,12 @@ type envConfig struct {
|
||||
CSMEnabled bool
|
||||
CSMPort string
|
||||
CSMClientID string
|
||||
|
||||
enableEndpointDiscovery string
|
||||
// Enables endpoint discovery via environment variables.
|
||||
//
|
||||
// AWS_ENABLE_ENDPOINT_DISCOVERY=true
|
||||
EnableEndpointDiscovery *bool
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -125,6 +132,10 @@ var (
|
||||
"AWS_SESSION_TOKEN",
|
||||
}
|
||||
|
||||
enableEndpointDiscoveryEnvKey = []string{
|
||||
"AWS_ENABLE_ENDPOINT_DISCOVERY",
|
||||
}
|
||||
|
||||
regionEnvKeys = []string{
|
||||
"AWS_REGION",
|
||||
"AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
|
||||
@@ -194,6 +205,12 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
|
||||
setFromEnvVal(&cfg.Region, regionKeys)
|
||||
setFromEnvVal(&cfg.Profile, profileKeys)
|
||||
|
||||
// endpoint discovery is in reference to it being enabled.
|
||||
setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey)
|
||||
if len(cfg.enableEndpointDiscovery) > 0 {
|
||||
cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false")
|
||||
}
|
||||
|
||||
setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
|
||||
setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
|
||||
|
||||
|
||||
8
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
@@ -452,6 +452,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.EnableEndpointDiscovery == nil {
|
||||
if envCfg.EnableEndpointDiscovery != nil {
|
||||
cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery)
|
||||
} else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil {
|
||||
cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery)
|
||||
}
|
||||
}
|
||||
|
||||
// Configure credentials if not already set
|
||||
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
|
||||
|
||||
|
||||
15
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
15
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
@@ -26,6 +26,9 @@ const (
|
||||
// Additional Config fields
|
||||
regionKey = `region`
|
||||
|
||||
// endpoint discovery group
|
||||
enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
|
||||
|
||||
// DefaultSharedConfigProfile is the default profile to be used when
|
||||
// loading configuration from the config files if another profile name
|
||||
// is not provided.
|
||||
@@ -62,6 +65,12 @@ type sharedConfig struct {
|
||||
//
|
||||
// region
|
||||
Region string
|
||||
|
||||
// EnableEndpointDiscovery can be enabled in the shared config by setting
|
||||
// endpoint_discovery_enabled to true
|
||||
//
|
||||
// endpoint_discovery_enabled = true
|
||||
EnableEndpointDiscovery *bool
|
||||
}
|
||||
|
||||
type sharedConfigFile struct {
|
||||
@@ -219,6 +228,12 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e
|
||||
cfg.Region = v
|
||||
}
|
||||
|
||||
// Endpoint discovery
|
||||
if section.Has(enableEndpointDiscoveryKey) {
|
||||
v := section.Bool(enableEndpointDiscoveryKey)
|
||||
cfg.EnableEndpointDiscovery = &v
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
1
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
@@ -134,6 +134,7 @@ var requiredSignedHeaders = rules{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||
"X-Amz-Storage-Class": struct{}{},
|
||||
"X-Amz-Tagging": struct{}{},
|
||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
||||
"X-Amz-Content-Sha256": struct{}{},
|
||||
},
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.15.62"
|
||||
const SDKVersion = "1.15.81"
|
||||
|
||||
4
vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
generated
vendored
@@ -12,10 +12,6 @@ func isComment(b []rune) bool {
|
||||
return true
|
||||
case '#':
|
||||
return true
|
||||
case '/':
|
||||
if len(b) > 1 {
|
||||
return b[1] == '/'
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user