mirror of
https://github.com/rclone/rclone.git
synced 2026-01-21 20:03:22 +00:00
Compare commits
27 Commits
v1.46
...
adb-remote
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3e1a0f368 | ||
|
|
2683939a4b | ||
|
|
ed88ae878e | ||
|
|
2ba5c35e88 | ||
|
|
240c15883f | ||
|
|
38864adc9c | ||
|
|
5991315990 | ||
|
|
73f0a67d98 | ||
|
|
ffe067d6e7 | ||
|
|
b5f563fb0f | ||
|
|
9310c7f3e2 | ||
|
|
1c1a8ef24b | ||
|
|
2cfbc2852d | ||
|
|
b167d30420 | ||
|
|
ec59760d9c | ||
|
|
076d3da825 | ||
|
|
c3eecbe933 | ||
|
|
d8e5b19ed4 | ||
|
|
43bc381e90 | ||
|
|
fb5ee22112 | ||
|
|
35327dad6f | ||
|
|
ef5e1909a0 | ||
|
|
bca5d8009e | ||
|
|
334f19c974 | ||
|
|
42a5bf1d9f | ||
|
|
71d1890316 | ||
|
|
d29c545627 |
@@ -20,6 +20,9 @@ linters:
|
||||
disable-all: true
|
||||
|
||||
issues:
|
||||
# Enable some lints excluded by default
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12rc1
|
||||
- tip
|
||||
go_import_path: github.com/ncw/rclone
|
||||
before_install:
|
||||
@@ -43,7 +44,7 @@ matrix:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: 1.11.x
|
||||
go: 1.12rc1
|
||||
env: GOTAGS=""
|
||||
cache:
|
||||
directories:
|
||||
@@ -55,5 +56,5 @@ deploy:
|
||||
on:
|
||||
repo: ncw/rclone
|
||||
all_branches: true
|
||||
go: 1.11.x
|
||||
go: 1.12rc1
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
2
Makefile
2
Makefile
@@ -11,7 +11,7 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
|
||||
821
backend/adb/adb.go
Normal file
821
backend/adb/adb.go
Normal file
@@ -0,0 +1,821 @@
|
||||
package adb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
adb "github.com/thinkhy/go-adb"
|
||||
"github.com/thinkhy/go-adb/wire"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "adb",
|
||||
Description: "Android Debug Bridge",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "serial",
|
||||
Help: "The device serial to use. Leave empty for auto selection.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "host",
|
||||
Default: "localhost",
|
||||
Help: "The ADB server host.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Default: 5037,
|
||||
Help: "The ADB server port.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "executable",
|
||||
Help: "The ADB executable path.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Serial string
|
||||
Host string
|
||||
Port uint16
|
||||
Executable string
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
}
|
||||
|
||||
// Fs represents a adb device
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
client *adb.Adb
|
||||
device *execDevice
|
||||
statFunc statFunc
|
||||
statFuncMu sync.Mutex
|
||||
touchFunc touchFunc
|
||||
touchFuncMu sync.Mutex
|
||||
}
|
||||
|
||||
// Object describes a adb file
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("ADB root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
root = "/"
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
statFunc: (*Object).statTry,
|
||||
touchFunc: (*Object).touchTry,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
f.client, err = adb.NewWithConfig(adb.ServerConfig{
|
||||
Host: opt.Host,
|
||||
Port: int(opt.Port),
|
||||
PathToAdb: opt.Executable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not configure ADB server")
|
||||
}
|
||||
err = f.client.StartServer()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not start ADB server")
|
||||
}
|
||||
|
||||
serverVersion, err := f.client.ServerVersion()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get ADB server version")
|
||||
}
|
||||
fs.Debugf(f, "ADB server version: 0x%X", serverVersion)
|
||||
|
||||
serials, err := f.client.ListDeviceSerials()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get ADB devices")
|
||||
}
|
||||
descriptor := adb.AnyDevice()
|
||||
if opt.Serial != "" {
|
||||
descriptor = adb.DeviceWithSerial(opt.Serial)
|
||||
}
|
||||
if len(serials) > 1 && opt.Serial == "" {
|
||||
return nil, errors.New("Multiple ADB devices found. Use the serial config to select a specific device")
|
||||
}
|
||||
f.device = &execDevice{f.client.Device(descriptor)}
|
||||
|
||||
// follow symlinks for root pathes
|
||||
entry, err := f.newEntryFollowSymlinks("")
|
||||
switch err {
|
||||
case nil:
|
||||
case fs.ErrorObjectNotFound:
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
switch entry.(type) {
|
||||
case fs.Object:
|
||||
f.root = path.Dir(f.root)
|
||||
return f, fs.ErrorIsFile
|
||||
case nil:
|
||||
return f, nil
|
||||
case fs.Directory:
|
||||
return f, nil
|
||||
default:
|
||||
return nil, errors.Errorf("Invalid root entry type %t", entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Precision of the object storage system
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return 1 * time.Second
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
p := path.Join(f.root, dir)
|
||||
dirEntries, err := f.device.ListDirEntries(p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ListDirEntries")
|
||||
}
|
||||
|
||||
defer fs.CheckClose(dirEntries, &err)
|
||||
|
||||
found := false
|
||||
for dirEntries.Next() {
|
||||
found = true
|
||||
dirEntry := dirEntries.Entry()
|
||||
switch dirEntry.Name {
|
||||
case ".", "..":
|
||||
continue
|
||||
}
|
||||
fsEntry, err := f.entryForDirEntry(path.Join(dir, dirEntry.Name), dirEntry, f.opt.FollowSymlinks)
|
||||
if err != nil {
|
||||
fs.Errorf(p, "Listing error: %q: %v", dirEntry.Name, err)
|
||||
return nil, err
|
||||
} else if fsEntry != nil {
|
||||
entries = append(entries, fsEntry)
|
||||
} else {
|
||||
fs.Debugf(f, "Skipping DirEntry %#v", dirEntry)
|
||||
}
|
||||
}
|
||||
err = dirEntries.Err()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ListDirEntries")
|
||||
}
|
||||
if !found {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) entryForDirEntry(remote string, e *adb.DirEntry, followSymlinks bool) (fs.DirEntry, error) {
|
||||
o := f.newObjectWithInfo(remote, e)
|
||||
// Follow symlinks if required
|
||||
if followSymlinks && (e.Mode&os.ModeSymlink) != 0 {
|
||||
err := f.statFunc(&o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return fs.NewDir(remote, o.modTime), nil
|
||||
}
|
||||
return &o, nil
|
||||
}
|
||||
|
||||
func (f *Fs) newEntry(remote string) (fs.DirEntry, error) {
|
||||
return f.newEntryWithFollow(remote, f.opt.FollowSymlinks)
|
||||
}
|
||||
func (f *Fs) newEntryFollowSymlinks(remote string) (fs.DirEntry, error) {
|
||||
return f.newEntryWithFollow(remote, true)
|
||||
}
|
||||
func (f *Fs) newEntryWithFollow(remote string, followSymlinks bool) (fs.DirEntry, error) {
|
||||
entry, err := f.device.Stat(path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
if adb.HasErrCode(err, adb.FileNoExistError) {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, errors.Wrapf(err, "Stat failed")
|
||||
}
|
||||
return f.entryForDirEntry(remote, entry, followSymlinks)
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectWithInfo(remote string, e *adb.DirEntry) Object {
|
||||
return Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(e.Size),
|
||||
mode: e.Mode,
|
||||
modTime: e.ModifiedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
entry, err := f.newEntry(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, ok := entry.(fs.Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||
// return an error or upload it properly (rather than e.g. calling panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
// Temporary Object under construction - info filled in by Update()
|
||||
o := f.newObject(remote)
|
||||
err := o.Update(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
func (f *Fs) newObject(remote string) *Object {
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
p := path.Join(f.root, dir)
|
||||
output, code, err := f.device.execCommandWithExitCode("mkdir -p", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
entry, _ := f.newEntry(p)
|
||||
if _, ok := entry.(fs.Directory); ok {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("mkdir %q failed with %d: %q", dir, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "mkdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
p := path.Join(f.root, dir)
|
||||
output, code, err := f.device.execCommandWithExitCode("rmdir", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("rmdir %q failed with %d: %q", dir, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "rmdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(t time.Time) error {
|
||||
return o.fs.touchFunc(o, t)
|
||||
}
|
||||
|
||||
func (o *Object) stat() error {
|
||||
return o.statStatArg(statArgC, path.Join(o.fs.root, o.remote))
|
||||
}
|
||||
|
||||
func (o *Object) setMetadata(entry *adb.DirEntry) {
|
||||
// Don't overwrite the values if we don't need to
|
||||
// this avoids upsetting the race detector
|
||||
if o.size != int64(entry.Size) {
|
||||
o.size = int64(entry.Size)
|
||||
}
|
||||
if !o.modTime.Equal(entry.ModifiedAt) {
|
||||
o.modTime = entry.ModifiedAt
|
||||
}
|
||||
if o.mode != entry.Mode {
|
||||
o.mode = decodeEntryMode(uint32(entry.Mode))
|
||||
}
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
const blockSize = 1 << 12
|
||||
|
||||
var offset, count int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if offset > o.size {
|
||||
offset = o.size
|
||||
}
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
} else if count+offset > o.size {
|
||||
count = o.size - offset
|
||||
}
|
||||
fs.Debugf(o, "Open: remote: %q offset: %d count: %d", o.remote, offset, count)
|
||||
|
||||
if count == 0 {
|
||||
return ioutil.NopCloser(bytes.NewReader(nil)), nil
|
||||
}
|
||||
offsetBlocks, offsetRest := offset/blockSize, offset%blockSize
|
||||
countBlocks := (count-1)/blockSize + 1
|
||||
|
||||
conn, err := o.fs.device.execCommand(fmt.Sprintf("sh -c 'dd \"if=$0\" bs=%d skip=%d count=%d 2>/dev/null'", blockSize, offsetBlocks, countBlocks), path.Join(o.fs.root, o.remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &adbReader{
|
||||
ReadCloser: readers.NewLimitedReadCloser(conn, count+offsetRest),
|
||||
skip: offsetRest,
|
||||
expected: count,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
for _, option := range options {
|
||||
if option.Mandatory() {
|
||||
fs.Logf(option, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
written, err := o.writeToFile(path.Join(o.fs.root, o.remote), in, 0666, src.ModTime())
|
||||
if err != nil {
|
||||
if removeErr := o.Remove(); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
expected := src.Size()
|
||||
if expected == -1 {
|
||||
expected = written
|
||||
}
|
||||
for _, t := range []int64{100, 250, 500, 1000, 2500, 5000, 10000} {
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if o.size == expected {
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(o, "Invalid size after update, expected: %d got: %d", expected, o.size)
|
||||
time.Sleep(time.Duration(t) * time.Millisecond)
|
||||
}
|
||||
return o.stat()
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("rm", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("rm %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "rm")
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) writeToFile(path string, rd io.Reader, perms os.FileMode, modeTime time.Time) (written int64, err error) {
|
||||
dst, err := o.fs.device.OpenWrite(path, perms, modeTime)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fs.CheckClose(dst, &err)
|
||||
return io.Copy(dst, rd)
|
||||
}
|
||||
|
||||
type statFunc func(*Object) error
|
||||
|
||||
func (o *Object) statTry() error {
|
||||
o.fs.statFuncMu.Lock()
|
||||
defer o.fs.statFuncMu.Unlock()
|
||||
|
||||
for _, f := range []statFunc{
|
||||
(*Object).statStatL, (*Object).statRealPath, (*Object).statReadLink,
|
||||
} {
|
||||
err := f(o)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "%s", err)
|
||||
} else {
|
||||
o.fs.statFunc = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unable to resolve link target")
|
||||
}
|
||||
|
||||
const (
|
||||
statArgLc = "-Lc"
|
||||
statArgC = "-c"
|
||||
)
|
||||
|
||||
func (o *Object) statStatL() error {
|
||||
return o.statStatArg(statArgLc, path.Join(o.fs.root, o.remote))
|
||||
}
|
||||
|
||||
func (o *Object) statStatArg(arg, path string) error {
|
||||
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("stat %s %s", arg, "%f,%s,%Y"), path)
|
||||
output = strings.TrimSpace(output)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("stat %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "stat")
|
||||
}
|
||||
|
||||
parts := strings.Split(output, ",")
|
||||
if len(parts) != 3 {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
|
||||
mode, err := strconv.ParseUint(parts[0], 16, 32)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
size, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
modTime, err := strconv.ParseInt(parts[2], 10, 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
|
||||
o.size = int64(size)
|
||||
o.modTime = time.Unix(modTime, 0)
|
||||
o.mode = decodeEntryMode(uint32(mode))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) statReadLink() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("readlink -f", p)
|
||||
output = strings.TrimSuffix(output, "\n")
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("readlink %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "readlink")
|
||||
}
|
||||
return o.statStatArg(statArgC, output)
|
||||
}
|
||||
func (o *Object) statRealPath() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("realpath", p)
|
||||
output = strings.TrimSuffix(output, "\n")
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("realpath %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "realpath")
|
||||
}
|
||||
return o.statStatArg(statArgC, output)
|
||||
}
|
||||
|
||||
type touchFunc func(*Object, time.Time) error
|
||||
|
||||
func (o *Object) touchTry(t time.Time) error {
|
||||
o.fs.touchFuncMu.Lock()
|
||||
defer o.fs.touchFuncMu.Unlock()
|
||||
|
||||
for _, f := range []touchFunc{
|
||||
(*Object).touchCmd, (*Object).touchCd,
|
||||
} {
|
||||
err := f(o, t)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "%s", err)
|
||||
} else {
|
||||
o.fs.touchFunc = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unable to resolve link target")
|
||||
}
|
||||
|
||||
const (
|
||||
touchArgCmd = "-cmd"
|
||||
touchArgCd = "-cd"
|
||||
)
|
||||
|
||||
func (o *Object) touchCmd(t time.Time) error {
|
||||
return o.touchStatArg(touchArgCmd, path.Join(o.fs.root, o.remote), t)
|
||||
}
|
||||
func (o *Object) touchCd(t time.Time) error {
|
||||
return o.touchStatArg(touchArgCd, path.Join(o.fs.root, o.remote), t)
|
||||
}
|
||||
|
||||
func (o *Object) touchStatArg(arg, path string, t time.Time) error {
|
||||
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("touch %s %s", arg, t.Format(time.RFC3339Nano)), path)
|
||||
output = strings.TrimSpace(output)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("touch %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "touch")
|
||||
}
|
||||
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if diff, ok := checkTimeEqualWithPrecision(t, o.modTime, o.fs.Precision()); !ok {
|
||||
return errors.Errorf("touch %q to %s was ineffective: %d", o.remote, t, diff)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
||||
dt := t0.Sub(t1)
|
||||
if dt >= precision || dt <= -precision {
|
||||
return dt, false
|
||||
}
|
||||
return dt, true
|
||||
}
|
||||
|
||||
func decodeEntryMode(entryMode uint32) os.FileMode {
|
||||
const (
|
||||
unixIFBLK = 0x6000
|
||||
unixIFMT = 0xf000
|
||||
unixIFCHR = 0x2000
|
||||
unixIFDIR = 0x4000
|
||||
unixIFIFO = 0x1000
|
||||
unixIFLNK = 0xa000
|
||||
unixIFREG = 0x8000
|
||||
unixIFSOCK = 0xc000
|
||||
unixISGID = 0x400
|
||||
unixISUID = 0x800
|
||||
unixISVTX = 0x200
|
||||
)
|
||||
|
||||
mode := os.FileMode(entryMode & 0777)
|
||||
switch entryMode & unixIFMT {
|
||||
case unixIFBLK:
|
||||
mode |= os.ModeDevice
|
||||
case unixIFCHR:
|
||||
mode |= os.ModeDevice | os.ModeCharDevice
|
||||
case unixIFDIR:
|
||||
mode |= os.ModeDir
|
||||
case unixIFIFO:
|
||||
mode |= os.ModeNamedPipe
|
||||
case unixIFLNK:
|
||||
mode |= os.ModeSymlink
|
||||
case unixIFREG:
|
||||
// nothing to do
|
||||
case unixIFSOCK:
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
if entryMode&unixISGID != 0 {
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if entryMode&unixISUID != 0 {
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if entryMode&unixISVTX != 0 {
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
type execDevice struct {
|
||||
*adb.Device
|
||||
}
|
||||
|
||||
func (d *execDevice) execCommandWithExitCode(cmd string, arg string) (string, int, error) {
|
||||
cmdLine := fmt.Sprintf("sh -c '%s \"$0\"; echo :$?' '%s'", cmd, strings.Replace(arg, "'", "'\\''", -1))
|
||||
fs.Debugf("adb", "exec: %s", cmdLine)
|
||||
conn, err := d.execCommand(cmdLine)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
resp, err := conn.ReadUntilEof()
|
||||
if err != nil {
|
||||
return "", -1, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
|
||||
outStr := string(resp)
|
||||
idx := strings.LastIndexByte(outStr, ':')
|
||||
if idx == -1 {
|
||||
return outStr, -1, fmt.Errorf("adb shell aborted, can not parse exit code")
|
||||
}
|
||||
exitCode, _ := strconv.Atoi(strings.TrimSpace(outStr[idx+1:]))
|
||||
if exitCode != 0 {
|
||||
err = adb.ShellExitError{Command: cmdLine, ExitCode: exitCode}
|
||||
}
|
||||
return outStr[:idx], exitCode, err
|
||||
}
|
||||
|
||||
func (d *execDevice) execCommand(cmd string, args ...string) (*wire.Conn, error) {
|
||||
cmd = prepareCommandLineEscaped(cmd, args...)
|
||||
conn, err := d.Dial()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil && conn != nil {
|
||||
_ = conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
req := fmt.Sprintf("exec:%s", cmd)
|
||||
|
||||
if err = conn.SendMessage([]byte(req)); err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
if _, err = conn.ReadStatus(req); err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func prepareCommandLineEscaped(cmd string, args ...string) string {
|
||||
for i, arg := range args {
|
||||
args[i] = fmt.Sprintf("'%s'", strings.Replace(arg, "'", "'\\''", -1))
|
||||
}
|
||||
|
||||
// Prepend the command to the args array.
|
||||
if len(args) > 0 {
|
||||
cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type adbReader struct {
|
||||
io.ReadCloser
|
||||
skip int64
|
||||
read int64
|
||||
expected int64
|
||||
}
|
||||
|
||||
func (r *adbReader) Read(b []byte) (n int, err error) {
|
||||
n, err = r.ReadCloser.Read(b)
|
||||
if s := r.skip; n > 0 && s > 0 {
|
||||
_n := int64(n)
|
||||
if _n <= s {
|
||||
r.skip -= _n
|
||||
return r.Read(b)
|
||||
}
|
||||
r.skip = 0
|
||||
copy(b, b[s:n])
|
||||
n -= int(s)
|
||||
}
|
||||
r.read += int64(n)
|
||||
if err == io.EOF && r.read < r.expected {
|
||||
fs.Debugf("adb", "Read: read: %d expected: %d n: %d", r.read, r.expected, n)
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
20
backend/adb/adb_test.go
Normal file
20
backend/adb/adb_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Test ADB filesystem interface
|
||||
package adb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/adb"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAdb:/data/local/tmp",
|
||||
NilObject: (*adb.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: "TestAdb", Key: "copy_links", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package all
|
||||
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/backend/adb"
|
||||
_ "github.com/ncw/rclone/backend/alias"
|
||||
_ "github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/backend/azureblob"
|
||||
|
||||
@@ -392,6 +392,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
|
||||
f.container = parts.ContainerName
|
||||
containerURL = azblob.NewContainerURL(*u, pipeline)
|
||||
} else {
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
|
||||
@@ -125,6 +125,14 @@ minimum size.`,
|
||||
Help: `Disable checksums for large (> upload cutoff) files`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_url",
|
||||
Help: `Custom endpoint for downloads.
|
||||
|
||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||
free egress for data downloaded through the Cloudflare network.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -140,6 +148,7 @@ type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
@@ -1296,9 +1305,17 @@ var _ io.ReadCloser = &openFile{}
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.fs.info.DownloadURL,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
// otherwise use the custom downloadUrl
|
||||
if o.fs.opt.DownloadURL == "" {
|
||||
opts.RootURL = o.fs.info.DownloadURL
|
||||
} else {
|
||||
opts.RootURL = o.fs.opt.DownloadURL
|
||||
}
|
||||
|
||||
// Download by id if set otherwise by name
|
||||
if o.id != "" {
|
||||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||
|
||||
@@ -214,7 +214,7 @@ func shouldRetry(err error) (bool, error) {
|
||||
return true, err
|
||||
}
|
||||
// Keep old behavior for backward compatibility
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
|
||||
@@ -1488,7 +1488,7 @@ func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
if size <= 0 {
|
||||
panic("size passed into uploadMultipart must be > 0")
|
||||
return nil, errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
|
||||
// Create upload session
|
||||
@@ -1535,7 +1535,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
||||
// This function will set modtime after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
panic("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
|
||||
@@ -161,7 +161,6 @@ type UserInfo struct {
|
||||
PublicLinkQuota int64 `json:"publiclinkquota"`
|
||||
Email string `json:"email"`
|
||||
UserID int `json:"userid"`
|
||||
Result int `json:"result"`
|
||||
Quota int64 `json:"quota"`
|
||||
TrashRevretentionDays int `json:"trashrevretentiondays"`
|
||||
Premium bool `json:"premium"`
|
||||
|
||||
@@ -346,7 +346,7 @@ func init() {
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-west-1.dream.io",
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
Provider: "Dreamhost",
|
||||
}, {
|
||||
|
||||
@@ -69,7 +69,7 @@ type Prop struct {
|
||||
Status []string `xml:"DAV: status"`
|
||||
Name string `xml:"DAV: prop>displayname,omitempty"`
|
||||
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
|
||||
IsCollection *int `xml:"DAV: prop>iscollection,omitempty"` // this is a Microsoft extension see #2716
|
||||
IsCollection *string `xml:"DAV: prop>iscollection,omitempty"` // this is a Microsoft extension see #2716
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
|
||||
|
||||
@@ -173,9 +173,16 @@ func itemIsDir(item *api.Response) bool {
|
||||
fs.Debugf(nil, "Unknown resource type %q/%q on %q", t.Space, t.Local, item.Props.Name)
|
||||
}
|
||||
// the iscollection prop is a Microsoft extension, but if present it is a reliable indicator
|
||||
// if the above check failed - see #2716
|
||||
// if the above check failed - see #2716. This can be an integer or a boolean - see #2964
|
||||
if t := item.Props.IsCollection; t != nil {
|
||||
return *t != 0
|
||||
switch x := strings.ToLower(*t); x {
|
||||
case "0", "false":
|
||||
return false
|
||||
case "1", "true":
|
||||
return true
|
||||
default:
|
||||
fs.Debugf(nil, "Unknown value %q for IsCollection", x)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ github-release release \
|
||||
--name "rclone" \
|
||||
--description "Rclone - rsync for cloud storage. Sync files to and from many cloud storage providers."
|
||||
|
||||
for build in `ls build | grep -v current`; do
|
||||
for build in `ls build | grep -v current | grep -v testbuilds`; do
|
||||
echo "Uploading ${build}"
|
||||
base="${build%.*}"
|
||||
parts=(${base//-/ })
|
||||
|
||||
@@ -37,7 +37,7 @@ documentation, changelog and configuration walkthroughs.
|
||||
|
||||
const (
|
||||
bashCompletionFunc = `
|
||||
__custom_func() {
|
||||
__rclone_custom_func() {
|
||||
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
|
||||
local cur cword prev words
|
||||
if declare -F _init_completion > /dev/null; then
|
||||
|
||||
123
cmd/info/info.go
123
cmd/info/info.go
@@ -21,11 +21,22 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type position int
|
||||
|
||||
const (
|
||||
positionMiddle position = 1 << iota
|
||||
positionLeft
|
||||
positionRight
|
||||
positionNone position = 0
|
||||
positionAll position = positionRight<<1 - 1
|
||||
)
|
||||
|
||||
var (
|
||||
checkNormalization bool
|
||||
checkControl bool
|
||||
checkLength bool
|
||||
checkStreaming bool
|
||||
positionList = []position{positionMiddle, positionLeft, positionRight}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -59,7 +70,7 @@ a bit of go code for each one.
|
||||
type results struct {
|
||||
f fs.Fs
|
||||
mu sync.Mutex
|
||||
charNeedsEscaping map[rune]bool
|
||||
stringNeedsEscaping map[string]position
|
||||
maxFileLength int
|
||||
canWriteUnnormalized bool
|
||||
canReadUnnormalized bool
|
||||
@@ -69,8 +80,8 @@ type results struct {
|
||||
|
||||
func newResults(f fs.Fs) *results {
|
||||
return &results{
|
||||
f: f,
|
||||
charNeedsEscaping: make(map[rune]bool),
|
||||
f: f,
|
||||
stringNeedsEscaping: make(map[string]position),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,13 +90,13 @@ func (r *results) Print() {
|
||||
fmt.Printf("// %s\n", r.f.Name())
|
||||
if checkControl {
|
||||
escape := []string{}
|
||||
for c, needsEscape := range r.charNeedsEscaping {
|
||||
if needsEscape {
|
||||
for c, needsEscape := range r.stringNeedsEscaping {
|
||||
if needsEscape != positionNone {
|
||||
escape = append(escape, fmt.Sprintf("0x%02X", c))
|
||||
}
|
||||
}
|
||||
sort.Strings(escape)
|
||||
fmt.Printf("charNeedsEscaping = []byte{\n")
|
||||
fmt.Printf("stringNeedsEscaping = []byte{\n")
|
||||
fmt.Printf("\t%s\n", strings.Join(escape, ", "))
|
||||
fmt.Printf("}\n")
|
||||
}
|
||||
@@ -130,20 +141,45 @@ func (r *results) checkUTF8Normalization() {
|
||||
}
|
||||
}
|
||||
|
||||
// check we can write file with the rune passed in
|
||||
func (r *results) checkChar(c rune) {
|
||||
fs.Infof(r.f, "Writing file 0x%02X", c)
|
||||
path := fmt.Sprintf("0x%02X-%c-", c, c)
|
||||
_, err := r.writeFile(path)
|
||||
escape := false
|
||||
if err != nil {
|
||||
fs.Infof(r.f, "Couldn't write file 0x%02X", c)
|
||||
escape = true
|
||||
} else {
|
||||
fs.Infof(r.f, "OK writing file 0x%02X", c)
|
||||
func (r *results) checkStringPositions(s string) {
|
||||
fs.Infof(r.f, "Writing position file 0x%0X", s)
|
||||
positionError := positionNone
|
||||
|
||||
for _, pos := range positionList {
|
||||
path := ""
|
||||
switch pos {
|
||||
case positionMiddle:
|
||||
path = fmt.Sprintf("position-middle-%0X-%s-", s, s)
|
||||
case positionLeft:
|
||||
path = fmt.Sprintf("%s-position-left-%0X", s, s)
|
||||
case positionRight:
|
||||
path = fmt.Sprintf("position-right-%0X-%s", s, s)
|
||||
default:
|
||||
panic("invalid position: " + pos.String())
|
||||
}
|
||||
_, writeErr := r.writeFile(path)
|
||||
if writeErr != nil {
|
||||
fs.Infof(r.f, "Writing %s position file 0x%0X Error: %s", pos.String(), s, writeErr)
|
||||
} else {
|
||||
fs.Infof(r.f, "Writing %s position file 0x%0X OK", pos.String(), s)
|
||||
}
|
||||
obj, getErr := r.f.NewObject(path)
|
||||
if getErr != nil {
|
||||
fs.Infof(r.f, "Getting %s position file 0x%0X Error: %s", pos.String(), s, getErr)
|
||||
} else {
|
||||
if obj.Size() != 50 {
|
||||
fs.Infof(r.f, "Getting %s position file 0x%0X Invalid Size: %d", pos.String(), s, obj.Size())
|
||||
} else {
|
||||
fs.Infof(r.f, "Getting %s position file 0x%0X OK", pos.String(), s)
|
||||
}
|
||||
}
|
||||
if writeErr != nil || getErr != nil {
|
||||
positionError += pos
|
||||
}
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
r.charNeedsEscaping[c] = escape
|
||||
r.stringNeedsEscaping[s] = positionError
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -157,19 +193,28 @@ func (r *results) checkControls() {
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
for i := rune(0); i < 128; i++ {
|
||||
s := string(i)
|
||||
if i == 0 || i == '/' {
|
||||
// We're not even going to check NULL or /
|
||||
r.charNeedsEscaping[i] = true
|
||||
r.stringNeedsEscaping[s] = positionAll
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
c := i
|
||||
go func() {
|
||||
go func(s string) {
|
||||
defer wg.Done()
|
||||
token := <-tokens
|
||||
r.checkChar(c)
|
||||
r.checkStringPositions(s)
|
||||
tokens <- token
|
||||
}()
|
||||
}(s)
|
||||
}
|
||||
for _, s := range []string{"\", "\xBF", "\xFE"} {
|
||||
wg.Add(1)
|
||||
go func(s string) {
|
||||
defer wg.Done()
|
||||
token := <-tokens
|
||||
r.checkStringPositions(s)
|
||||
tokens <- token
|
||||
}(s)
|
||||
}
|
||||
wg.Wait()
|
||||
fs.Infof(r.f, "Done trying to create control character file names")
|
||||
@@ -268,3 +313,35 @@ func readInfo(f fs.Fs) error {
|
||||
r.Print()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e position) String() string {
|
||||
switch e {
|
||||
case positionNone:
|
||||
return "none"
|
||||
case positionAll:
|
||||
return "all"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if e&positionMiddle != 0 {
|
||||
buf.WriteString("middle")
|
||||
e &= ^positionMiddle
|
||||
}
|
||||
if e&positionLeft != 0 {
|
||||
if buf.Len() != 0 {
|
||||
buf.WriteRune(',')
|
||||
}
|
||||
buf.WriteString("left")
|
||||
e &= ^positionLeft
|
||||
}
|
||||
if e&positionRight != 0 {
|
||||
if buf.Len() != 0 {
|
||||
buf.WriteRune(',')
|
||||
}
|
||||
buf.WriteString("right")
|
||||
e &= ^positionRight
|
||||
}
|
||||
if e != positionNone {
|
||||
panic("invalid position")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
40
cmd/info/process.sh
Normal file
40
cmd/info/process.sh
Normal file
@@ -0,0 +1,40 @@
|
||||
set -euo pipefail
|
||||
|
||||
for f in info-*.log; do
|
||||
for pos in middle left right; do
|
||||
egrep -oe " Writing $pos position file [^ ]* \w+" $f | sort | cut -d' ' -f 7 > $f.write_$pos
|
||||
egrep -oe " Getting $pos position file [^ ]* \w+" $f | sort | cut -d' ' -f 7 > $f.get_$pos
|
||||
done
|
||||
{
|
||||
echo "${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}"
|
||||
echo "Write\tWrite\tWrite\tGet\tGet\tGet"
|
||||
echo "Mid\tLeft\tRight\tMid\tLeft\tRight"
|
||||
paste $f.write_{middle,left,right} $f.get_{middle,left,right}
|
||||
} > $f.csv
|
||||
done
|
||||
|
||||
for f in info-*.list; do
|
||||
for pos in middle left right; do
|
||||
cat $f | perl -lne 'print $1 if /^\s+[0-9]+\s+(.*)/' | grep -a "position-$pos-" | sort > $f.$pos
|
||||
done
|
||||
{
|
||||
echo "${${f%.list}#info-}\t${${f%.list}#info-}\t${${f%.list}#info-}"
|
||||
echo "List\tList\tList"
|
||||
echo "Mid\tLeft\tRight"
|
||||
for e in 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F BF EFBCBC FE; do
|
||||
echo -n $(perl -lne 'print "'$e'-$1" if /^position-middle-'$e'-(.*)-/' $f.middle | tr -d "\t\r" | grep -a . || echo Miss)
|
||||
echo -n "\t"
|
||||
echo -n $(perl -lne 'print "'$e'-$1" if /^(.*)-position-left-'$e'/' $f.left | tr -d "\t\r" | grep -a . || echo Miss)
|
||||
echo -n "\t"
|
||||
echo $(perl -lne 'print "'$e'-$1" if /^position-right-'$e'-(.*)/' $f.right | tr -d "\t\r" | grep -a . || echo Miss)
|
||||
# echo -n $(grep -a "position-middle-$e-" $f.middle | tr -d "\t\r" || echo Miss)"\t"
|
||||
# echo -n $(grep -a "position-left-$e" $f.left | tr -d "\t\r" || echo Miss)"\t"
|
||||
# echo $(grep -a "position-right-$e-" $f.right | tr -d "\t\r" || echo Miss)
|
||||
done
|
||||
} > $f.csv
|
||||
done
|
||||
|
||||
for f in info-*.list; do
|
||||
paste ${f%.list}.log.csv $f.csv > ${f%.list}.full.csv
|
||||
done
|
||||
paste *.full.csv > info-complete.csv
|
||||
3
cmd/info/test.cmd
Normal file
3
cmd/info/test.cmd
Normal file
@@ -0,0 +1,3 @@
|
||||
rclone.exe purge info
|
||||
rclone.exe info -vv info > info-LocalWindows.log 2>&1
|
||||
rclone.exe ls -vv info > info-LocalWindows.list 2>&1
|
||||
43
cmd/info/test.sh
Executable file
43
cmd/info/test.sh
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env zsh
|
||||
#
|
||||
# example usage:
|
||||
# $GOPATH/src/github.com/ncw/rclone/cmd/info/test.sh --list | \
|
||||
# parallel -P20 $GOPATH/src/github.com/ncw/rclone/cmd/info/test.sh
|
||||
|
||||
export PATH=$GOPATH/src/github.com/ncw/rclone:$PATH
|
||||
|
||||
typeset -A allRemotes
|
||||
allRemotes=(
|
||||
TestAmazonCloudDrive '--low-level-retries=2 --checkers=5'
|
||||
TestB2 ''
|
||||
TestBox ''
|
||||
TestDrive '--tpslimit=5'
|
||||
TestCrypt ''
|
||||
TestDropbox '--checkers=1'
|
||||
TestJottacloud ''
|
||||
TestMega ''
|
||||
TestOneDrive ''
|
||||
TestOpenDrive '--low-level-retries=2 --checkers=5'
|
||||
TestPcloud '--low-level-retries=2 --timeout=15s'
|
||||
TestS3 ''
|
||||
Local ''
|
||||
)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
set -- ${(k)allRemotes[@]}
|
||||
elif [[ $1 = --list ]]; then
|
||||
printf '%s\n' ${(k)allRemotes[@]}
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for remote; do
|
||||
dir=$remote:infotest
|
||||
if [[ $remote = Local ]]; then
|
||||
dir=infotest
|
||||
fi
|
||||
rclone purge $dir || :
|
||||
rclone info -vv $dir ${=allRemotes[$remote]} &> info-$remote.log
|
||||
rclone ls -vv $dir &> info-$remote.list
|
||||
done
|
||||
@@ -236,3 +236,4 @@ Contributors
|
||||
* weetmuts <oehrstroem@gmail.com>
|
||||
* Jonathan <vanillajonathan@users.noreply.github.com>
|
||||
* James Carpenter <orbsmiv@users.noreply.github.com>
|
||||
* Vince <vince0villamora@gmail.com>
|
||||
|
||||
@@ -670,6 +670,24 @@ uploaded compressed files.
|
||||
There is no need to set this in normal operation, and doing so will
|
||||
decrease the network transfer efficiency of rclone.
|
||||
|
||||
### --no-traverse ###
|
||||
|
||||
The `--no-traverse` flag controls whether the destination file system
|
||||
is traversed when using the `copy` or `move` commands.
|
||||
`--no-traverse` is not compatible with `sync` and will be ignored if
|
||||
you supply it with `sync`.
|
||||
|
||||
If you are only copying a small number of files (or are filtering most
|
||||
of the files) and/or have a large number of files on the destination
|
||||
then `--no-traverse` will stop rclone listing the destination and save
|
||||
time.
|
||||
|
||||
However, if you are copying a large number of files, especially if you
|
||||
are doing a copy where lots of the files under consideration haven't
|
||||
changed and won't need copying then you shouldn't use `--no-traverse`.
|
||||
|
||||
See [rclone copy](/commands/rclone_copy/) for an example of how to use it.
|
||||
|
||||
### --no-update-modtime ###
|
||||
|
||||
When using this flag, rclone won't update modification times of remote
|
||||
@@ -991,6 +1009,47 @@ with this setting.
|
||||
|
||||
Prints the version number
|
||||
|
||||
SSL/TLS options
|
||||
---------------
|
||||
|
||||
The outoing SSL/TLS connections rclone makes can be controlled with
|
||||
these options. For example this can be very useful with the HTTP or
|
||||
WebDAV backends. Rclone HTTP servers have their own set of
|
||||
configuration for SSL/TLS which you can find in their documentation.
|
||||
|
||||
### --ca-cert string
|
||||
|
||||
This loads the PEM encoded certificate authority certificate and uses
|
||||
it to verify the certificates of the servers rclone connects to.
|
||||
|
||||
If you have generated certificates signed with a local CA then you
|
||||
will need this flag to connect to servers using those certificates.
|
||||
|
||||
### --client-cert string
|
||||
|
||||
This loads the PEM encoded client side certificate.
|
||||
|
||||
This is used for [mutual TLS authentication](https://en.wikipedia.org/wiki/Mutual_authentication).
|
||||
|
||||
The `--client-key` flag is required too when using this.
|
||||
|
||||
### --client-key string
|
||||
|
||||
This loads the PEM encoded client side private key used for mutual TLS
|
||||
authentication. Used in conjunction with `--client-cert`.
|
||||
|
||||
### --no-check-certificate=true/false ###
|
||||
|
||||
`--no-check-certificate` controls whether a client verifies the
|
||||
server's certificate chain and host name.
|
||||
If `--no-check-certificate` is true, TLS accepts any certificate
|
||||
presented by the server and any host name in that certificate.
|
||||
In this mode, TLS is susceptible to man-in-the-middle attacks.
|
||||
|
||||
This option defaults to `false`.
|
||||
|
||||
**This should be used only for testing.**
|
||||
|
||||
Configuration Encryption
|
||||
------------------------
|
||||
Your configuration file contains information for logging in to
|
||||
@@ -1147,36 +1206,6 @@ use it.
|
||||
|
||||
Write memory profile to file. This can be analysed with `go tool pprof`.
|
||||
|
||||
### --no-check-certificate=true/false ###
|
||||
|
||||
`--no-check-certificate` controls whether a client verifies the
|
||||
server's certificate chain and host name.
|
||||
If `--no-check-certificate` is true, TLS accepts any certificate
|
||||
presented by the server and any host name in that certificate.
|
||||
In this mode, TLS is susceptible to man-in-the-middle attacks.
|
||||
|
||||
This option defaults to `false`.
|
||||
|
||||
**This should be used only for testing.**
|
||||
|
||||
### --no-traverse ###
|
||||
|
||||
The `--no-traverse` flag controls whether the destination file system
|
||||
is traversed when using the `copy` or `move` commands.
|
||||
`--no-traverse` is not compatible with `sync` and will be ignored if
|
||||
you supply it with `sync`.
|
||||
|
||||
If you are only copying a small number of files (or are filtering most
|
||||
of the files) and/or have a large number of files on the destination
|
||||
then `--no-traverse` will stop rclone listing the destination and save
|
||||
time.
|
||||
|
||||
However, if you are copying a large number of files, especially if you
|
||||
are doing a copy where lots of the files under consideration haven't
|
||||
changed and won't need copying then you shouldn't use `--no-traverse`.
|
||||
|
||||
See [rclone copy](/commands/rclone_copy/) for an example of how to use it.
|
||||
|
||||
Filtering
|
||||
---------
|
||||
|
||||
|
||||
@@ -201,8 +201,9 @@ func (s *StatsInfo) String() string {
|
||||
}
|
||||
dtRounded := dt - (dt % (time.Second / 10))
|
||||
|
||||
displaySpeed := speed
|
||||
if fs.Config.DataRateUnit == "bits" {
|
||||
speed = speed * 8
|
||||
displaySpeed *= 8
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -235,7 +236,7 @@ func (s *StatsInfo) String() string {
|
||||
fs.SizeSuffix(s.bytes),
|
||||
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
||||
percent(s.bytes, totalSize),
|
||||
fs.SizeSuffix(speed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
|
||||
fs.SizeSuffix(displaySpeed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
|
||||
etaString(currentSize, totalSize, speed),
|
||||
xfrchkString,
|
||||
)
|
||||
|
||||
@@ -87,6 +87,9 @@ type ConfigInfo struct {
|
||||
Progress bool
|
||||
Cookie bool
|
||||
UseMmap bool
|
||||
CaCert string // Client Side CA
|
||||
ClientCert string // Client Side Cert
|
||||
ClientKey string // Client Side Key
|
||||
}
|
||||
|
||||
// NewConfig creates a new config with everything set to the default
|
||||
|
||||
@@ -89,6 +89,9 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.UseMmap, "use-mmap", "", fs.Config.UseMmap, "Use mmap allocator (see docs).")
|
||||
flags.StringVarP(flagSet, &fs.Config.CaCert, "ca-cert", "", fs.Config.CaCert, "CA certificate used to verify servers")
|
||||
flags.StringVarP(flagSet, &fs.Config.ClientCert, "client-cert", "", fs.Config.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth")
|
||||
flags.StringVarP(flagSet, &fs.Config.ClientKey, "client-key", "", fs.Config.ClientKey, "Client SSL private key (PEM) for mutual TLS auth")
|
||||
}
|
||||
|
||||
// SetFlags converts any flags into config which weren't straight foward
|
||||
|
||||
@@ -5,6 +5,9 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
@@ -130,7 +133,39 @@ func NewTransport(ci *fs.ConfigInfo) http.RoundTripper {
|
||||
t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost
|
||||
t.TLSHandshakeTimeout = ci.ConnectTimeout
|
||||
t.ResponseHeaderTimeout = ci.Timeout
|
||||
t.TLSClientConfig = &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify}
|
||||
|
||||
// TLS Config
|
||||
t.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: ci.InsecureSkipVerify,
|
||||
}
|
||||
|
||||
// Load client certs
|
||||
if ci.ClientCert != "" || ci.ClientKey != "" {
|
||||
if ci.ClientCert == "" || ci.ClientKey == "" {
|
||||
log.Fatalf("Both --client-cert and --client-key must be set")
|
||||
}
|
||||
cert, err := tls.LoadX509KeyPair(ci.ClientCert, ci.ClientKey)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load --client-cert/--client-key pair: %v", err)
|
||||
}
|
||||
t.TLSClientConfig.Certificates = []tls.Certificate{cert}
|
||||
t.TLSClientConfig.BuildNameToCertificate()
|
||||
}
|
||||
|
||||
// Load CA cert
|
||||
if ci.CaCert != "" {
|
||||
caCert, err := ioutil.ReadFile(ci.CaCert)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read --ca-cert: %v", err)
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
ok := caCertPool.AppendCertsFromPEM(caCert)
|
||||
if !ok {
|
||||
log.Fatalf("Failed to add certificates from --ca-cert")
|
||||
}
|
||||
t.TLSClientConfig.RootCAs = caCertPool
|
||||
}
|
||||
|
||||
t.DisableCompression = ci.NoGzip
|
||||
t.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return dialContextTimeout(ctx, network, addr, ci)
|
||||
|
||||
@@ -283,7 +283,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec
|
||||
// If can't server side copy, do it manually
|
||||
if err == fs.ErrorCantCopy {
|
||||
var in0 io.ReadCloser
|
||||
in0, err = src.Open(hashOption)
|
||||
in0, err = newReOpen(src, hashOption, fs.Config.LowLevelRetries)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to open source object")
|
||||
} else {
|
||||
|
||||
111
fs/operations/reopen.go
Normal file
111
fs/operations/reopen.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// reOpen is a wrapper for an object reader which reopens the stream on error
|
||||
type reOpen struct {
|
||||
mu sync.Mutex // mutex to protect the below
|
||||
src fs.Object // object to open
|
||||
hashOption *fs.HashesOption // option to pass to initial open
|
||||
rc io.ReadCloser // underlying stream
|
||||
read int64 // number of bytes read from this stream
|
||||
maxTries int // maximum number of retries
|
||||
tries int // number of retries we've had so far in this stream
|
||||
err error // if this is set then Read/Close calls will return it
|
||||
opened bool // if set then rc is valid and needs closing
|
||||
}
|
||||
|
||||
var (
|
||||
errorFileClosed = errors.New("file already closed")
|
||||
errorTooManyTries = errors.New("failed to reopen: too many retries")
|
||||
)
|
||||
|
||||
// newReOpen makes a handle which will reopen itself and seek to where it was on errors
|
||||
func newReOpen(src fs.Object, hashOption *fs.HashesOption, maxTries int) (rc io.ReadCloser, err error) {
|
||||
h := &reOpen{
|
||||
src: src,
|
||||
hashOption: hashOption,
|
||||
maxTries: maxTries,
|
||||
}
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
err = h.open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// open the underlying handle - call with lock held
|
||||
//
|
||||
// we don't retry here as the Open() call will itself have low level retries
|
||||
func (h *reOpen) open() error {
|
||||
var opts = make([]fs.OpenOption, 1)
|
||||
if h.tries > 0 {
|
||||
}
|
||||
if h.read == 0 {
|
||||
// put hashOption on if reading from the start, ditch otherwise
|
||||
opts[0] = h.hashOption
|
||||
} else {
|
||||
// seek to the read point
|
||||
opts[0] = &fs.SeekOption{Offset: h.read}
|
||||
}
|
||||
h.tries++
|
||||
if h.tries > h.maxTries {
|
||||
h.err = errorTooManyTries
|
||||
} else {
|
||||
h.rc, h.err = h.src.Open(opts...)
|
||||
}
|
||||
if h.err != nil {
|
||||
if h.tries > 1 {
|
||||
fs.Debugf(h.src, "Reopen failed after %d bytes read: %v", h.read, h.err)
|
||||
}
|
||||
return h.err
|
||||
}
|
||||
h.opened = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read bytes retrying as necessary
|
||||
func (h *reOpen) Read(p []byte) (n int, err error) {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
if h.err != nil {
|
||||
// return a previous error if there is one
|
||||
return n, h.err
|
||||
}
|
||||
n, err = h.rc.Read(p)
|
||||
if err != nil {
|
||||
h.err = err
|
||||
}
|
||||
h.read += int64(n)
|
||||
if err != nil && err != io.EOF {
|
||||
// close underlying stream
|
||||
h.opened = false
|
||||
_ = h.rc.Close()
|
||||
// reopen stream, clearing error if successful
|
||||
fs.Debugf(h.src, "Reopening on read failure after %d bytes: retry %d/%d: %v", h.read, h.tries, h.maxTries, err)
|
||||
if h.open() == nil {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close the stream
|
||||
func (h *reOpen) Close() error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
if !h.opened {
|
||||
return errorFileClosed
|
||||
}
|
||||
h.opened = false
|
||||
h.err = errorFileClosed
|
||||
return h.rc.Close()
|
||||
}
|
||||
144
fs/operations/reopen_test.go
Normal file
144
fs/operations/reopen_test.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fstest/mockobject"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// check interface
|
||||
var _ io.ReadCloser = (*reOpen)(nil)
|
||||
|
||||
var errorTestError = errors.New("test error")
|
||||
|
||||
// this is a wrapper for an mockobject with a custom Open function
|
||||
//
|
||||
// breaks indicate the number of bytes to read before returning an
|
||||
// error
|
||||
type reOpenTestObject struct {
|
||||
fs.Object
|
||||
breaks []int64
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
//
|
||||
// This will break after reading the number of bytes in breaks
|
||||
func (o *reOpenTestObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
rc, err := o.Object.Open(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(o.breaks) > 0 {
|
||||
// Pop a breakpoint off
|
||||
N := o.breaks[0]
|
||||
o.breaks = o.breaks[1:]
|
||||
// If 0 then return an error immediately
|
||||
if N == 0 {
|
||||
return nil, errorTestError
|
||||
}
|
||||
// Read N bytes then an error
|
||||
r := io.MultiReader(&io.LimitedReader{R: rc, N: N}, errorReader{errorTestError})
|
||||
// Wrap with Close in a new readCloser
|
||||
rc = readCloser{Reader: r, Closer: rc}
|
||||
}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// Return an error only
|
||||
type errorReader struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// Read returning an error
|
||||
func (er errorReader) Read(p []byte) (n int, err error) {
|
||||
return 0, er.err
|
||||
}
|
||||
|
||||
// Contents for the mock object
|
||||
var reOpenTestcontents = []byte("0123456789")
|
||||
|
||||
// Start the test with the given breaks
|
||||
func testReOpen(breaks []int64, maxRetries int) (io.ReadCloser, error) {
|
||||
srcOrig := mockobject.New("potato").WithContent(reOpenTestcontents, mockobject.SeekModeRegular)
|
||||
src := &reOpenTestObject{
|
||||
Object: srcOrig,
|
||||
breaks: breaks,
|
||||
}
|
||||
hashOption := &fs.HashesOption{Hashes: hash.NewHashSet(hash.MD5)}
|
||||
return newReOpen(src, hashOption, maxRetries)
|
||||
}
|
||||
|
||||
func TestReOpenBasics(t *testing.T) {
|
||||
// open
|
||||
h, err := testReOpen(nil, 10)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check contents read correctly
|
||||
got, err := ioutil.ReadAll(h)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, reOpenTestcontents, got)
|
||||
|
||||
// Check read after end
|
||||
var buf = make([]byte, 1)
|
||||
n, err := h.Read(buf)
|
||||
assert.Equal(t, 0, n)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
|
||||
// Check close
|
||||
assert.NoError(t, h.Close())
|
||||
|
||||
// Check double close
|
||||
assert.Equal(t, errorFileClosed, h.Close())
|
||||
|
||||
// Check read after close
|
||||
n, err = h.Read(buf)
|
||||
assert.Equal(t, 0, n)
|
||||
assert.Equal(t, errorFileClosed, err)
|
||||
}
|
||||
|
||||
func TestReOpenErrorAtStart(t *testing.T) {
|
||||
// open with immediate breaking
|
||||
h, err := testReOpen([]int64{0}, 10)
|
||||
assert.Equal(t, errorTestError, err)
|
||||
assert.Nil(t, h)
|
||||
}
|
||||
|
||||
func TestReOpenError(t *testing.T) {
|
||||
// open with a few break points but less than the max
|
||||
h, err := testReOpen([]int64{2, 1, 3}, 10)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check contents
|
||||
got, err := ioutil.ReadAll(h)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, reOpenTestcontents, got)
|
||||
|
||||
// check close
|
||||
assert.NoError(t, h.Close())
|
||||
}
|
||||
|
||||
func TestReOpenFail(t *testing.T) {
|
||||
// open with a few break points but >= the max
|
||||
h, err := testReOpen([]int64{2, 1, 3}, 3)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check contents
|
||||
got, err := ioutil.ReadAll(h)
|
||||
assert.Equal(t, errorTestError, err)
|
||||
assert.Equal(t, reOpenTestcontents[:6], got)
|
||||
|
||||
// check old error is returned
|
||||
var buf = make([]byte, 1)
|
||||
n, err := h.Read(buf)
|
||||
assert.Equal(t, 0, n)
|
||||
assert.Equal(t, errorTooManyTries, err)
|
||||
|
||||
// Check close
|
||||
assert.Equal(t, errorFileClosed, h.Close())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// Version of rclone
|
||||
var Version = "v1.46"
|
||||
var Version = "v1.46-DEV"
|
||||
|
||||
59
go.mod
59
go.mod
@@ -2,65 +2,60 @@ module github.com/ncw/rclone
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
|
||||
cloud.google.com/go v0.33.1 // indirect
|
||||
cloud.google.com/go v0.36.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.1.8
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20181023070848-cf01652132cc
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20190123011202-457680cc0804
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
|
||||
github.com/Unknwon/goconfig v0.0.0-20181105214110-56bd8ab18619
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6
|
||||
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4
|
||||
github.com/abbot/go-http-auth v0.4.0
|
||||
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5
|
||||
github.com/aws/aws-sdk-go v1.15.81
|
||||
github.com/aws/aws-sdk-go v1.16.31
|
||||
github.com/billziss-gh/cgofuse v1.1.0
|
||||
github.com/coreos/bbolt v0.0.0-20180318001526-af9db2027c98
|
||||
github.com/coreos/bbolt v1.3.2
|
||||
github.com/cpuguy83/go-md2man v1.0.8 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/djherbis/times v1.1.0
|
||||
github.com/djherbis/times v1.2.0
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 // indirect
|
||||
github.com/goftp/server v0.0.0-20190111142836-88de73f463af
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jlaffaye/ftp v0.0.0-20181101112434-47f21d10f0ee
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect
|
||||
github.com/jlaffaye/ftp v0.0.0-20190126081051-8019e6774408
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.3 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
github.com/ncw/swift v1.0.44
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed
|
||||
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pengsrc/go-shared v0.2.0 // indirect
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/pkg/sftp v1.8.3
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pkg/sftp v1.10.0
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46
|
||||
github.com/russross/blackfriday v1.5.2 // indirect
|
||||
github.com/sevlyar/go-daemon v0.1.4
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 // indirect
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20190104022628-a2dfa6d0dab6
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c // indirect
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/cobra v0.0.4-0.20190109003409-7547e83b2d85
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/stretchr/testify v1.2.2
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190205172012-55a226cf41da
|
||||
github.com/thinkhy/go-adb v0.0.0-20190123053734-b4b48de70418
|
||||
github.com/xanzy/ssh-agent v0.2.0
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a
|
||||
golang.org/x/oauth2 v0.0.0-20181120190819-8f65e3013eba
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b
|
||||
golang.org/x/text v0.3.0
|
||||
go.etcd.io/bbolt v1.3.2 // indirect
|
||||
golang.org/x/crypto v0.0.0-20190208162236-193df9c0f06f
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006
|
||||
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4
|
||||
golang.org/x/sys v0.0.0-20190213121743-983097b1a8a3
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c
|
||||
google.golang.org/api v0.0.0-20181120235003-faade3cbb06a
|
||||
google.golang.org/appengine v1.3.0 // indirect
|
||||
google.golang.org/api v0.1.0
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.1
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
)
|
||||
|
||||
replace github.com/thinkhy/go-adb v0.0.0-20190123053734-b4b48de70418 => ../../../github.com/thinkhy/go-adb
|
||||
|
||||
242
go.sum
242
go.sum
@@ -1,152 +1,284 @@
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669 h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw=
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
cloud.google.com/go v0.33.1 h1:fmJQWZ1w9PGkHR1YL/P7HloDvqlmKQ4Vpb7PC2e+aCk=
|
||||
cloud.google.com/go v0.33.1/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.36.0 h1:+aCSj7tOo2LODWVEuZDZeGCckdt6MlSF+X/rB3wUiS8=
|
||||
cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40=
|
||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8 h1:KmVRa8oFMaargVesEuuEoiLCQ4zCCwQ8QX/xg++KS20=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20181023070848-cf01652132cc h1:BElWmFfsryQD72OcovStKpkIcd4e9ozSkdsTNQDSHGk=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20181023070848-cf01652132cc/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20190123011202-457680cc0804 h1:QjGHsWFbJyl312t0BtgkmZy2TTYA++FF0UakGbr3ZhQ=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20190123011202-457680cc0804/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Unknwon/goconfig v0.0.0-20181105214110-56bd8ab18619 h1:6X8iB881g299aNEv6KXrcjL31iLOH7yA6NXoQX+MbDg=
|
||||
github.com/Unknwon/goconfig v0.0.0-20181105214110-56bd8ab18619/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw=
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6 h1:UCQe3W9LxwL2ff5r0PqQfS6Oe5MCKpIH8twfK/dH9mw=
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg=
|
||||
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4 h1:mK1/QgFPU4osbhjJ26B1w738kjQHaGJcon8uCLMS8fk=
|
||||
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg=
|
||||
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
|
||||
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
|
||||
github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5 h1:lmyFvZXNGOmsKCYXNwzDLWafnxeewxsFwdsvTvSC1sg=
|
||||
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5/go.mod h1:DGqLjaZ3ziKKNRt+U5Q9PLWJ52Q/4rxfaaH/b3QYKaE=
|
||||
github.com/aws/aws-sdk-go v1.15.81 h1:va7uoFaV9uKAtZ6BTmp1u7paoMsizYRRLvRuoC07nQ8=
|
||||
github.com/aws/aws-sdk-go v1.15.81/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/aws/aws-sdk-go v1.16.31 h1:bE4FW2uulhXiAaF4Guw0OzX9gBZ4iWvXWe6VT8Jxr28=
|
||||
github.com/aws/aws-sdk-go v1.16.31/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/billziss-gh/cgofuse v1.1.0 h1:tATn9ZDvuPcOVlvR4tJitGHgAqy1y18+4mKmRfdfjec=
|
||||
github.com/billziss-gh/cgofuse v1.1.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/coreos/bbolt v0.0.0-20180318001526-af9db2027c98 h1:0gQU5Ebjs1V8Ow5bBzxZzr0peNjJILSkSb30IfZtshQ=
|
||||
github.com/coreos/bbolt v0.0.0-20180318001526-af9db2027c98/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/cheggaaa/pb v2.0.6+incompatible/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
|
||||
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/djherbis/times v1.1.0 h1:NFhBDODme0XNX+/5ETW9qL6v3Ty57psiXIQBrzzg44E=
|
||||
github.com/djherbis/times v1.1.0/go.mod h1:CGMZlo255K5r4Yw0b9RRfFQpM2y7uOmxg4jm9HsaVf8=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible h1:ZFvUIiBbGhDY5zF8yjLoWhUAYs7uDodUpbvTS5oelDE=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.0+incompatible h1:FQu9Ef2dkC8g2rQmcQmpXXeoRegXHODBfveKKZu6+e8=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.1-0.20181205034806-56e5f6595305+incompatible h1:4HSS6BiPqvgsn/zrwt6KOYY+mw153zmhvewZIRh1+Ds=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.1-0.20181205034806-56e5f6595305+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/djherbis/times v1.2.0 h1:xANXjsC/iBqbO00vkWlYwPWgBgEVU6m6AFYg0Pic+Mc=
|
||||
github.com/djherbis/times v1.2.0/go.mod h1:CGMZlo255K5r4Yw0b9RRfFQpM2y7uOmxg4jm9HsaVf8=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible h1:9jnukMIowLSo3SY7+GTwxmYJv4QC0LxXbo97zHWCyoc=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 h1:cC0Hbb+18DJ4i6ybqDybvj4wdIDS4vnD0QEci98PgM8=
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9/go.mod h1:GpOj6zuVBG3Inr9qjEnuVTgBlk2lZ1S9DcoFiXWyKss=
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1 h1:WjgeEHEDLGx56ndxS6FYi6qFjZGajSVHPuEPdpJ60cI=
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
|
||||
github.com/goftp/server v0.0.0-20190111142836-88de73f463af h1:PJxb1aA1z+Ohy2j28L92+ng9phXpZVFRFbPkfmJcRGo=
|
||||
github.com/goftp/server v0.0.0-20190111142836-88de73f463af/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jlaffaye/ftp v0.0.0-20181101112434-47f21d10f0ee h1:oCvgfeGIc6GipidJVyG0Hd9R/w6TO8bBYyJg15ZgJkw=
|
||||
github.com/jlaffaye/ftp v0.0.0-20181101112434-47f21d10f0ee/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/jlaffaye/ftp v0.0.0-20190126081051-8019e6774408 h1:9AeqmB6KVEJ7GQU985MGQc7Mtxz1+C+JZkgqBnUWqMU=
|
||||
github.com/jlaffaye/ftp v0.0.0-20190126081051-8019e6774408/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro=
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zXP2Ew+9VupSpfybr+TxmjdH0=
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
|
||||
github.com/ncw/swift v1.0.42 h1:ztvRb6hs52IHOcaYt73f9lXYLIeIuWgdooRDhdyllGI=
|
||||
github.com/ncw/swift v1.0.42/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/ncw/swift v1.0.43 h1:TZn2l/bPV0CqG+/G5BFh/ROWnyX7dL2D0URaOjNQRsw=
|
||||
github.com/ncw/swift v1.0.43/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/ncw/swift v1.0.44 h1:EKvOTvUxElbpDWqxsyVaVGvc2IfuOqQnRmjnR2AGhQ4=
|
||||
github.com/ncw/swift v1.0.44/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed h1:bAVGG6B+R5qpSylrrA+BAMrzYkdAoiTaKPVxRB+4cyM=
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840=
|
||||
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pengsrc/go-shared v0.2.0 h1:Ho86LhaXOYgv9FjBmIp5CO0LmaIj49H2HZhYh0+7uW8=
|
||||
github.com/pengsrc/go-shared v0.2.0/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.8.3 h1:9jSe2SxTM8/3bXZjtqnkgTBW+lA8db0knZJyns7gpBA=
|
||||
github.com/pkg/sftp v1.8.3/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.10.0 h1:DGA1KlA9esU6WcicH+P8PxFZOl15O6GYtab1cIJdOlE=
|
||||
github.com/pkg/sftp v1.10.0/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46 h1:w2CpS5muK+jyydnmlkqpAhzKmHmMBzBkfYUDjQNS1Dk=
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46/go.mod h1:U2bmx0hDj8EyDdcxmD5t3XHDnBFnyNNc22n1R4008eM=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sevlyar/go-daemon v0.1.4 h1:Ayxp/9SNHwPBjV+kKbnHl2ch6rhxTu08jfkGkoxgULQ=
|
||||
github.com/sevlyar/go-daemon v0.1.4/go.mod h1:6dJpPatBT9eUwM5VCw9Bt6CdX9Tk6UWvhW3MebLDRKE=
|
||||
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
|
||||
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
|
||||
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
|
||||
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
|
||||
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
|
||||
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
|
||||
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c h1:fyKiXKO1/I/B6Y2U8T7WdQGWzwehOuGIrljPtt7YTTI=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
|
||||
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
|
||||
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
|
||||
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
|
||||
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
|
||||
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
|
||||
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||
github.com/skratchdot/open-golang v0.0.0-20190104022628-a2dfa6d0dab6 h1:cGT4dcuEyBwwu/v6tosyqcDp2yoIo/LwjMGixUvg3nU=
|
||||
github.com/skratchdot/open-golang v0.0.0-20190104022628-a2dfa6d0dab6/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY=
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.4-0.20190109003409-7547e83b2d85 h1:RghwryY75x76zKqO9v7NF+9lcmfW1/RNZBfqK4LSCKE=
|
||||
github.com/spf13/cobra v0.0.4-0.20190109003409-7547e83b2d85/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20180817194457-854bf31d998b h1:Yt/fB2INfWb29Vcya4X0BNCLmObKmDdt0o0IndFzEY8=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20180817194457-854bf31d998b/go.mod h1:ObGZcW2yPzFXEsaTZVMgLKhdUSHMYM3aEDE/u7YnfU8=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190204171941-1b8ac3503be8 h1:pRD8qAFGq7u9wK/lTg64wkdpCZh4lsTvRoEzQWWLMGE=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190204171941-1b8ac3503be8/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190205172012-55a226cf41da h1:hWHAUYEtxnj8tb2pHj5WPGEIE8leSi/3cMND8fUjsBE=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190205172012-55a226cf41da/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
|
||||
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
|
||||
github.com/yosemite-open/go-adb v0.0.0-20181206003817-d40962019194 h1:hQ7oP/X/5JR3gGKEEEZU3uihkDePXlFoTwr0XDu5CKg=
|
||||
github.com/yosemite-open/go-adb v0.0.0-20181206003817-d40962019194/go.mod h1:OoY1zUwKq/hv/6hBuQxzSRNu1XZ289eXaDNgoHa+3lU=
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible h1:/Z0q3/eSMoPYAuRmhjWtuGSmVVciFC6hfm3yfCKuvz0=
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible/go.mod h1:w6wqLDQ5bBTzxGJ55581UrSwLrsTAsdo9N6yX/8d9RY=
|
||||
golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869 h1:kkXA53yGe04D0adEYJwEVQjeBppL01Exg+fnMjfUraU=
|
||||
golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613 h1:MQ/ZZiDsUapFFiMS+vzwXkCTeEKaum+Do5rINYJDmxc=
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190208162236-193df9c0f06f h1:ETU2VEl7TnT5bl7IvuKEzTDpplg5wzGYsOCAPhdoEIg=
|
||||
golang.org/x/crypto v0.0.0-20190208162236-193df9c0f06f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20181120190819-8f65e3013eba h1:YDkOrzGLLYybtuP6ZgebnO4OWYEYVMFSniazXsxrFN8=
|
||||
golang.org/x/oauth2 v0.0.0-20181120190819-8f65e3013eba/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80=
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1 h1:VeAkjQVzKLmu+JnFcK96TPbkuaTIqwGGAzQ9hgwPjVg=
|
||||
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b h1:MQE+LT/ABUuuvEZ+YQAMSXindAdUh7slEmAkup74op4=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952 h1:FDfvYgoVsA7TTZSbgiqjAbfPbK47CNHdWl3h/PJtii0=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190213121743-983097b1a8a3 h1:+KlxhGbYkFs8lMfwKn+2ojry1ID5eBSMXprS2u/wqCE=
|
||||
golang.org/x/sys v0.0.0-20190213121743-983097b1a8a3/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
google.golang.org/api v0.0.0-20181120235003-faade3cbb06a h1:yMfgT1baklxtECXVk3UtZBELVXtVhDbK3/7xLFkFypw=
|
||||
google.golang.org/api v0.0.0-20181120235003-faade3cbb06a/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI=
|
||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0 h1:FBSsiFRMz3LBeXIomRnVzrQwSDj4ibvcRexLG0LZGQk=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
gopkg.in/VividCortex/ewma.v1 v1.1.1/go.mod h1:TekXuFipeiHWiAlO1+wSS23vTcyFau5u3rxXUSXj710=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v2 v2.0.6/go.mod h1:0CiZ1p8pvtxBlQpLXkHuUTpdJ1shm3OqCF1QugkjHL4=
|
||||
gopkg.in/fatih/color.v1 v1.7.0/go.mod h1:P7yosIhqIl/sX8J8UypY5M+dDpD2KmyfP5IRs5v/fo0=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/mattn/go-colorable.v0 v0.1.0/go.mod h1:BVJlBXzARQxdi3nZo6f6bnl5yR20/tOL6p+V0KejgSY=
|
||||
gopkg.in/mattn/go-isatty.v0 v0.0.4/go.mod h1:wt691ab7g0X4ilKZNmMII3egK0bTxl37fEn/Fwbd8gc=
|
||||
gopkg.in/mattn/go-runewidth.v0 v0.0.4/go.mod h1:BmXejnxvhwdaATwiJbB1vZ2dtXkQKZGu9yLFCZb4msQ=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||
|
||||
633
lib/encoder/encoder.go
Normal file
633
lib/encoder/encoder.go
Normal file
@@ -0,0 +1,633 @@
|
||||
/*
|
||||
Translate file names for usage on restrictive storage systems
|
||||
|
||||
The restricted set of characters are mapped to a unicode equivalent version
|
||||
(most to their FULLWIDTH variant) to increase compatability with other
|
||||
storage systems.
|
||||
See: http://unicode-search.net/unicode-namesearch.pl?term=FULLWIDTH
|
||||
|
||||
Encoders will also quote reserved characters to differentiate between
|
||||
the raw and encoded forms.
|
||||
*/
|
||||
|
||||
package encoder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
// adding this to any printable ASCII character turns it into the
|
||||
// FULLWIDTH variant
|
||||
fullOffset = 0xFEE0
|
||||
// the first rune of the SYMBOL FOR block for control characters
|
||||
symbolOffset = '␀' // SYMBOL FOR NULL
|
||||
// QuoteRune is the rune used for quoting reserved characters
|
||||
QuoteRune = '‛' // SINGLE HIGH-REVERSED-9 QUOTATION MARK
|
||||
// EncodeStandard contains the flags used for the Standard Encoder
|
||||
EncodeStandard = EncodeZero | EncodeSlash | EncodeCtl | EncodeDel
|
||||
// Standard defines the encoding that is used for paths in- and output by rclone.
|
||||
//
|
||||
// List of replaced characters:
|
||||
// (0x00) -> '␀' // SYMBOL FOR NULL
|
||||
// / (slash) -> '/' // FULLWIDTH SOLIDUS
|
||||
Standard = MultiEncoder(EncodeStandard)
|
||||
)
|
||||
|
||||
// Possible flags for the MultiEncoder
|
||||
const (
|
||||
EncodeZero uint = 0 // NUL(0x00)
|
||||
EncodeSlash uint = 1 << iota // /
|
||||
EncodeWin // :?"*<>|
|
||||
EncodeBackSlash // \
|
||||
EncodeHashPercent // #%
|
||||
EncodeDel // DEL(0x7F)
|
||||
EncodeCtl // CTRL(0x01-0x1F)
|
||||
EncodeLeftSpace // Leading SPACE
|
||||
EncodeLeftTilde // Leading ~
|
||||
EncodeRightSpace // Trailing SPACE
|
||||
EncodeRightPeriod // Trailing .
|
||||
EncodeInvalidUtf8 // Invalid UTF-8 bytes
|
||||
)
|
||||
|
||||
// Encoder can transform names to and from the original and translated version.
|
||||
type Encoder interface {
|
||||
// Encode takes a raw name and substitutes any reserved characters and
|
||||
// patterns in it
|
||||
Encode(string) string
|
||||
// Decode takes a name and undoes any substitutions made by Encode
|
||||
Decode(string) string
|
||||
|
||||
// FromStandardPath takes a / separated path in Standard encoding
|
||||
// and converts it to a / separated path in this encoding.
|
||||
FromStandardPath(string) string
|
||||
// FromStandardName takes name in Standard encoding and converts
|
||||
// it in this encoding.
|
||||
FromStandardName(string) string
|
||||
// ToStandardPath takes a / separated path in this encoding
|
||||
// and converts it to a / separated path in Standard encoding.
|
||||
ToStandardPath(string) string
|
||||
// ToStandardName takes name in this encoding and converts
|
||||
// it in Standard encoding.
|
||||
ToStandardName(string) string
|
||||
}
|
||||
|
||||
// MultiEncoder is a configurable Encoder. The Encode* constants in this
|
||||
// package can be combined using bitwise or (|) to enable handling of multiple
|
||||
// character classes
|
||||
type MultiEncoder uint
|
||||
|
||||
// Encode takes a raw name and substitutes any reserved characters and
|
||||
// patterns in it
|
||||
func (mask MultiEncoder) Encode(in string) string {
|
||||
var (
|
||||
encodeWin = uint(mask)&EncodeWin != 0
|
||||
encodeSlash = uint(mask)&EncodeSlash != 0
|
||||
encodeBackSlash = uint(mask)&EncodeBackSlash != 0
|
||||
encodeHashPercent = uint(mask)&EncodeHashPercent != 0
|
||||
encodeDel = uint(mask)&EncodeDel != 0
|
||||
encodeCtl = uint(mask)&EncodeCtl != 0
|
||||
encodeLeftSpace = uint(mask)&EncodeLeftSpace != 0
|
||||
encodeLeftTilde = uint(mask)&EncodeLeftTilde != 0
|
||||
encodeRightSpace = uint(mask)&EncodeRightSpace != 0
|
||||
encodeRightPeriod = uint(mask)&EncodeRightPeriod != 0
|
||||
encodeInvalidUnicode = uint(mask)&EncodeInvalidUtf8 != 0
|
||||
)
|
||||
|
||||
// handle prefix only replacements
|
||||
prefix := ""
|
||||
if encodeLeftSpace && len(in) > 0 { // Leading SPACE
|
||||
if in[0] == ' ' {
|
||||
prefix, in = "␠", in[1:] // SYMBOL FOR SPACE
|
||||
} else if r, l := utf8.DecodeRuneInString(in); r == '␠' { // SYMBOL FOR SPACE
|
||||
prefix, in = string(QuoteRune)+"␠", in[l:] // SYMBOL FOR SPACE
|
||||
}
|
||||
}
|
||||
if encodeLeftTilde && len(in) > 0 { // Leading ~
|
||||
if in[0] == '~' {
|
||||
prefix, in = string('~'+fullOffset), in[1:] // FULLWIDTH TILDE
|
||||
} else if r, l := utf8.DecodeRuneInString(in); r == '~'+fullOffset {
|
||||
prefix, in = string(QuoteRune)+string('~'+fullOffset), in[l:] // FULLWIDTH TILDE
|
||||
}
|
||||
}
|
||||
// handle suffix only replacements
|
||||
suffix := ""
|
||||
if encodeRightSpace && len(in) > 0 { // Trailing SPACE
|
||||
if in[len(in)-1] == ' ' {
|
||||
suffix, in = "␠", in[:len(in)-1] // SYMBOL FOR SPACE
|
||||
} else if r, l := utf8.DecodeLastRuneInString(in); r == '␠' {
|
||||
suffix, in = string(QuoteRune)+"␠", in[:len(in)-l] // SYMBOL FOR SPACE
|
||||
}
|
||||
}
|
||||
if encodeRightPeriod && len(in) > 0 { // Trailing .
|
||||
if in[len(in)-1] == '.' {
|
||||
suffix, in = ".", in[:len(in)-1] // FULLWIDTH FULL STOP
|
||||
} else if r, l := utf8.DecodeLastRuneInString(in); r == '.' {
|
||||
suffix, in = string(QuoteRune)+".", in[:len(in)-l] // FULLWIDTH FULL STOP
|
||||
}
|
||||
}
|
||||
index := 0
|
||||
if prefix == "" && suffix == "" {
|
||||
// find the first rune which (most likely) needs to be replaced
|
||||
index = strings.IndexFunc(in, func(r rune) bool {
|
||||
switch r {
|
||||
case 0, '␀', QuoteRune, utf8.RuneError:
|
||||
return true
|
||||
}
|
||||
if encodeWin { // :?"*<>|
|
||||
switch r {
|
||||
case '*', '<', '>', '?', ':', '|', '"',
|
||||
'*', '<', '>', '?', ':', '|', '"':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeSlash { // /
|
||||
switch r {
|
||||
case '/',
|
||||
'/':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeBackSlash { // \
|
||||
switch r {
|
||||
case '\\',
|
||||
'\':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeHashPercent { // #%
|
||||
switch r {
|
||||
case '#', '%',
|
||||
'#', '%':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeDel { // DEL(0x7F)
|
||||
switch r {
|
||||
case rune(0x7F), '␡':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeCtl { // CTRL(0x01-0x1F)
|
||||
if r >= 1 && r <= 0x1F {
|
||||
return true
|
||||
} else if r > symbolOffset && r <= symbolOffset+0x1F {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
// nothing to replace, return input
|
||||
if index == -1 {
|
||||
return in
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
out.Grow(len(in) + len(prefix) + len(suffix))
|
||||
out.WriteString(prefix)
|
||||
// copy the clean part of the input and skip it
|
||||
out.WriteString(in[:index])
|
||||
in = in[index:]
|
||||
|
||||
for i, r := range in {
|
||||
switch r {
|
||||
case 0:
|
||||
out.WriteRune(symbolOffset)
|
||||
continue
|
||||
case '␀', QuoteRune:
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
case utf8.RuneError:
|
||||
if encodeInvalidUnicode {
|
||||
// only encode invalid sequences and not utf8.RuneError
|
||||
if i+3 > len(in) || in[i:i+3] != string(utf8.RuneError) {
|
||||
_, l := utf8.DecodeRuneInString(in[i:])
|
||||
appendQuotedBytes(&out, in[i:i+l])
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// append the real bytes instead of utf8.RuneError
|
||||
_, l := utf8.DecodeRuneInString(in[i:])
|
||||
out.WriteString(in[i : i+l])
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeWin { // :?"*<>|
|
||||
switch r {
|
||||
case '*', '<', '>', '?', ':', '|', '"':
|
||||
out.WriteRune(r + fullOffset)
|
||||
continue
|
||||
case '*', '<', '>', '?', ':', '|', '"':
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeSlash { // /
|
||||
switch r {
|
||||
case '/':
|
||||
out.WriteRune(r + fullOffset)
|
||||
continue
|
||||
case '/':
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeBackSlash { // \
|
||||
switch r {
|
||||
case '\\':
|
||||
out.WriteRune(r + fullOffset)
|
||||
continue
|
||||
case '\':
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeHashPercent { // #%
|
||||
switch r {
|
||||
case '#', '%':
|
||||
out.WriteRune(r + fullOffset)
|
||||
continue
|
||||
case '#', '%':
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeDel { // DEL(0x7F)
|
||||
switch r {
|
||||
case rune(0x7F):
|
||||
out.WriteRune('␡') // SYMBOL FOR DELETE
|
||||
continue
|
||||
case '␡':
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeCtl { // CTRL(0x01-0x1F)
|
||||
if r >= 1 && r <= 0x1F {
|
||||
out.WriteRune('␀' + r) // SYMBOL FOR NULL
|
||||
continue
|
||||
} else if r > symbolOffset && r <= symbolOffset+0x1F {
|
||||
out.WriteRune(QuoteRune)
|
||||
out.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
out.WriteRune(r)
|
||||
}
|
||||
out.WriteString(suffix)
|
||||
return out.String()
|
||||
}
|
||||
|
||||
// Decode takes a name and undoes any substitutions made by Encode
|
||||
func (mask MultiEncoder) Decode(in string) string {
|
||||
var (
|
||||
encodeWin = uint(mask)&EncodeWin != 0
|
||||
encodeSlash = uint(mask)&EncodeSlash != 0
|
||||
encodeBackSlash = uint(mask)&EncodeBackSlash != 0
|
||||
encodeHashPercent = uint(mask)&EncodeHashPercent != 0
|
||||
encodeDel = uint(mask)&EncodeDel != 0
|
||||
encodeCtl = uint(mask)&EncodeCtl != 0
|
||||
encodeLeftSpace = uint(mask)&EncodeLeftSpace != 0
|
||||
encodeLeftTilde = uint(mask)&EncodeLeftTilde != 0
|
||||
encodeRightSpace = uint(mask)&EncodeRightSpace != 0
|
||||
encodeRightPeriod = uint(mask)&EncodeRightPeriod != 0
|
||||
encodeInvalidUnicode = uint(mask)&EncodeInvalidUtf8 != 0
|
||||
)
|
||||
|
||||
// handle prefix only replacements
|
||||
prefix := ""
|
||||
if r, l1 := utf8.DecodeRuneInString(in); encodeLeftSpace && r == '␠' { // SYMBOL FOR SPACE
|
||||
prefix, in = " ", in[l1:]
|
||||
} else if encodeLeftTilde && r == '~' { // FULLWIDTH TILDE
|
||||
prefix, in = "~", in[l1:]
|
||||
} else if r == QuoteRune {
|
||||
if r, l2 := utf8.DecodeRuneInString(in[l1:]); encodeLeftSpace && r == '␠' { // SYMBOL FOR SPACE
|
||||
prefix, in = "␠", in[l1+l2:]
|
||||
} else if encodeLeftTilde && r == '~' { // FULLWIDTH TILDE
|
||||
prefix, in = "~", in[l1+l2:]
|
||||
}
|
||||
}
|
||||
|
||||
// handle suffix only replacements
|
||||
suffix := ""
|
||||
if r, l := utf8.DecodeLastRuneInString(in); encodeRightSpace && r == '␠' { // SYMBOL FOR SPACE
|
||||
in = in[:len(in)-l]
|
||||
if r, l2 := utf8.DecodeLastRuneInString(in); r == QuoteRune {
|
||||
suffix, in = "␠", in[:len(in)-l2]
|
||||
} else {
|
||||
suffix = " "
|
||||
}
|
||||
} else if encodeRightPeriod && r == '.' { // FULLWIDTH FULL STOP
|
||||
in = in[:len(in)-l]
|
||||
if r, l2 := utf8.DecodeLastRuneInString(in); r == QuoteRune {
|
||||
suffix, in = ".", in[:len(in)-l2]
|
||||
} else {
|
||||
suffix = "."
|
||||
}
|
||||
}
|
||||
index := 0
|
||||
if prefix == "" && suffix == "" {
|
||||
// find the first rune which (most likely) needs to be replaced
|
||||
index = strings.IndexFunc(in, func(r rune) bool {
|
||||
switch r {
|
||||
case '␀', QuoteRune:
|
||||
return true
|
||||
}
|
||||
if encodeWin { // :?"*<>|
|
||||
switch r {
|
||||
case '*', '<', '>', '?', ':', '|', '"':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeSlash { // /
|
||||
switch r {
|
||||
case '/':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeBackSlash { // \
|
||||
switch r {
|
||||
case '\':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeHashPercent { // #%
|
||||
switch r {
|
||||
case '#', '%':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeDel { // DEL(0x7F)
|
||||
switch r {
|
||||
case '␡':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if encodeCtl { // CTRL(0x01-0x1F)
|
||||
if r > symbolOffset && r <= symbolOffset+0x1F {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
}
|
||||
// nothing to replace, return input
|
||||
if index == -1 {
|
||||
return in
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
out.Grow(len(in))
|
||||
out.WriteString(prefix)
|
||||
// copy the clean part of the input and skip it
|
||||
out.WriteString(in[:index])
|
||||
in = in[index:]
|
||||
var unquote, unquoteNext, skipNext bool
|
||||
|
||||
for i, r := range in {
|
||||
if skipNext {
|
||||
skipNext = false
|
||||
continue
|
||||
}
|
||||
unquote, unquoteNext = unquoteNext, false
|
||||
switch r {
|
||||
case '␀': // SYMBOL FOR NULL
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(0)
|
||||
}
|
||||
continue
|
||||
case QuoteRune:
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
unquoteNext = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
if encodeWin { // :?"*<>|
|
||||
switch r {
|
||||
case '*', '<', '>', '?', ':', '|', '"':
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(r - fullOffset)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeSlash { // /
|
||||
switch r {
|
||||
case '/': // FULLWIDTH SOLIDUS
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(r - fullOffset)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeBackSlash { // \
|
||||
switch r {
|
||||
case '\': // FULLWIDTH REVERSE SOLIDUS
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(r - fullOffset)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeHashPercent { // #%
|
||||
switch r {
|
||||
case '#', '%':
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(r - fullOffset)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeDel { // DEL(0x7F)
|
||||
switch r {
|
||||
case '␡': // SYMBOL FOR DELETE
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(0x7F)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if encodeCtl { // CTRL(0x01-0x1F)
|
||||
if r > symbolOffset && r <= symbolOffset+0x1F {
|
||||
if unquote {
|
||||
out.WriteRune(r)
|
||||
} else {
|
||||
out.WriteRune(r - symbolOffset)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if unquote {
|
||||
if encodeInvalidUnicode {
|
||||
skipNext = appendUnquotedByte(&out, in[i:])
|
||||
if skipNext {
|
||||
continue
|
||||
}
|
||||
}
|
||||
out.WriteRune(QuoteRune)
|
||||
}
|
||||
switch r {
|
||||
case utf8.RuneError:
|
||||
// append the real bytes instead of utf8.RuneError
|
||||
_, l := utf8.DecodeRuneInString(in[i:])
|
||||
out.WriteString(in[i : i+l])
|
||||
continue
|
||||
}
|
||||
|
||||
out.WriteRune(r)
|
||||
}
|
||||
if unquoteNext {
|
||||
out.WriteRune(QuoteRune)
|
||||
}
|
||||
out.WriteString(suffix)
|
||||
return out.String()
|
||||
}
|
||||
|
||||
// FromStandardPath takes a / separated path in Standard encoding
|
||||
// and converts it to a / separated path in this encoding.
|
||||
func (mask MultiEncoder) FromStandardPath(s string) string {
|
||||
return FromStandardPath(mask, s)
|
||||
}
|
||||
|
||||
// FromStandardName takes name in Standard encoding and converts
|
||||
// it in this encoding.
|
||||
func (mask MultiEncoder) FromStandardName(s string) string {
|
||||
return FromStandardName(mask, s)
|
||||
}
|
||||
|
||||
// ToStandardPath takes a / separated path in this encoding
|
||||
// and converts it to a / separated path in Standard encoding.
|
||||
func (mask MultiEncoder) ToStandardPath(s string) string {
|
||||
return ToStandardPath(mask, s)
|
||||
}
|
||||
|
||||
// ToStandardName takes name in this encoding and converts
|
||||
// it in Standard encoding.
|
||||
func (mask MultiEncoder) ToStandardName(s string) string {
|
||||
return ToStandardName(mask, s)
|
||||
}
|
||||
|
||||
func appendQuotedBytes(w io.Writer, s string) {
|
||||
for _, b := range []byte(s) {
|
||||
_, _ = fmt.Fprintf(w, string(QuoteRune)+"%02X", b)
|
||||
}
|
||||
}
|
||||
func appendUnquotedByte(w io.Writer, s string) bool {
|
||||
if len(s) < 2 {
|
||||
return false
|
||||
}
|
||||
u, err := strconv.ParseUint(s[:2], 16, 8)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
n, _ := w.Write([]byte{byte(u)})
|
||||
return n == 1
|
||||
}
|
||||
|
||||
type identity struct{}
|
||||
|
||||
func (identity) Encode(in string) string { return in }
|
||||
func (identity) Decode(in string) string { return in }
|
||||
|
||||
func (i identity) FromStandardPath(s string) string {
|
||||
return FromStandardPath(i, s)
|
||||
}
|
||||
func (i identity) FromStandardName(s string) string {
|
||||
return FromStandardName(i, s)
|
||||
}
|
||||
func (i identity) ToStandardPath(s string) string {
|
||||
return ToStandardPath(i, s)
|
||||
}
|
||||
func (i identity) ToStandardName(s string) string {
|
||||
return ToStandardName(i, s)
|
||||
}
|
||||
|
||||
// Identity returns a Encoder that always returns the input value
|
||||
func Identity() Encoder {
|
||||
return identity{}
|
||||
}
|
||||
|
||||
// FromStandardPath takes a / separated path in Standard encoding
|
||||
// and converts it to a / separated path in the given encoding.
|
||||
func FromStandardPath(e Encoder, s string) string {
|
||||
if e == Standard {
|
||||
return s
|
||||
}
|
||||
parts := strings.Split(s, "/")
|
||||
encoded := make([]string, len(parts))
|
||||
changed := false
|
||||
for i, p := range parts {
|
||||
enc := FromStandardName(e, p)
|
||||
changed = changed || enc != p
|
||||
encoded[i] = enc
|
||||
}
|
||||
if !changed {
|
||||
return s
|
||||
}
|
||||
return strings.Join(encoded, "/")
|
||||
}
|
||||
|
||||
// FromStandardName takes name in Standard encoding and converts
|
||||
// it in the given encoding.
|
||||
func FromStandardName(e Encoder, s string) string {
|
||||
if e == Standard {
|
||||
return s
|
||||
}
|
||||
return e.Encode(Standard.Decode(s))
|
||||
}
|
||||
|
||||
// ToStandardPath takes a / separated path in the given encoding
|
||||
// and converts it to a / separated path in Standard encoding.
|
||||
func ToStandardPath(e Encoder, s string) string {
|
||||
if e == Standard {
|
||||
return s
|
||||
}
|
||||
parts := strings.Split(s, "/")
|
||||
encoded := make([]string, len(parts))
|
||||
changed := false
|
||||
for i, p := range parts {
|
||||
dec := ToStandardName(e, p)
|
||||
changed = changed || dec != p
|
||||
encoded[i] = dec
|
||||
}
|
||||
if !changed {
|
||||
return s
|
||||
}
|
||||
return strings.Join(encoded, "/")
|
||||
}
|
||||
|
||||
// ToStandardName takes name in the given encoding and converts
|
||||
// it in Standard encoding.
|
||||
func ToStandardName(e Encoder, s string) string {
|
||||
if e == Standard {
|
||||
return s
|
||||
}
|
||||
return Standard.Encode(e.Decode(s))
|
||||
}
|
||||
2217
lib/encoder/encoder_cases_test.go
Normal file
2217
lib/encoder/encoder_cases_test.go
Normal file
File diff suppressed because it is too large
Load Diff
262
lib/encoder/encoder_test.go
Normal file
262
lib/encoder/encoder_test.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package encoder
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
mask uint
|
||||
in string
|
||||
out string
|
||||
}
|
||||
|
||||
func TestEncodeSingleMask(t *testing.T) {
|
||||
for i, tc := range testCasesSingle {
|
||||
e := MultiEncoder(tc.mask)
|
||||
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
|
||||
got := e.Encode(tc.in)
|
||||
if got != tc.out {
|
||||
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
|
||||
}
|
||||
got2 := e.Decode(got)
|
||||
if got2 != tc.in {
|
||||
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeSingleMaskEdge(t *testing.T) {
|
||||
for i, tc := range testCasesSingleEdge {
|
||||
e := MultiEncoder(tc.mask)
|
||||
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
|
||||
got := e.Encode(tc.in)
|
||||
if got != tc.out {
|
||||
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
|
||||
}
|
||||
got2 := e.Decode(got)
|
||||
if got2 != tc.in {
|
||||
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeInvalidUnicode(t *testing.T) {
|
||||
for i, tc := range []testCase{
|
||||
{
|
||||
mask: EncodeInvalidUtf8,
|
||||
in: "\xBF",
|
||||
out: "‛BF",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8,
|
||||
in: "\xBF\xFE",
|
||||
out: "‛BF‛FE",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8,
|
||||
in: "a\xBF\xFEb",
|
||||
out: "a‛BF‛FEb",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8,
|
||||
in: "a\xBFξ\xFEb",
|
||||
out: "a‛BFξ‛FEb",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8 | EncodeBackSlash,
|
||||
in: "a\xBF\\\xFEb",
|
||||
out: "a‛BF\‛FEb",
|
||||
}, {
|
||||
mask: 0,
|
||||
in: "\xBF",
|
||||
out: "\xBF",
|
||||
}, {
|
||||
mask: 0,
|
||||
in: "\xBF\xFE",
|
||||
out: "\xBF\xFE",
|
||||
}, {
|
||||
mask: 0,
|
||||
in: "a\xBF\xFEb",
|
||||
out: "a\xBF\xFEb",
|
||||
}, {
|
||||
mask: 0,
|
||||
in: "a\xBFξ\xFEb",
|
||||
out: "a\xBFξ\xFEb",
|
||||
}, {
|
||||
mask: EncodeBackSlash,
|
||||
in: "a\xBF\\\xFEb",
|
||||
out: "a\xBF\\xFEb",
|
||||
},
|
||||
} {
|
||||
e := MultiEncoder(tc.mask)
|
||||
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
|
||||
got := e.Encode(tc.in)
|
||||
if got != tc.out {
|
||||
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
|
||||
}
|
||||
got2 := e.Decode(got)
|
||||
if got2 != tc.in {
|
||||
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestDecodeHalf(t *testing.T) {
|
||||
for i, tc := range []testCase{
|
||||
{
|
||||
mask: 0,
|
||||
in: "‛",
|
||||
out: "‛",
|
||||
}, {
|
||||
mask: 0,
|
||||
in: "‛‛",
|
||||
out: "‛",
|
||||
}, {
|
||||
mask: 0,
|
||||
in: "‛a‛",
|
||||
out: "‛a‛",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8,
|
||||
in: "a‛B‛Eg",
|
||||
out: "a‛B‛Eg",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8,
|
||||
in: "a‛B\‛Eg",
|
||||
out: "a‛B\‛Eg",
|
||||
}, {
|
||||
mask: EncodeInvalidUtf8 | EncodeBackSlash,
|
||||
in: "a‛B\‛Eg",
|
||||
out: "a‛B\\‛Eg",
|
||||
},
|
||||
} {
|
||||
e := MultiEncoder(tc.mask)
|
||||
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
|
||||
got := e.Decode(tc.in)
|
||||
if got != tc.out {
|
||||
t.Errorf("Decode(%q) want %q got %q", tc.in, tc.out, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const oneDrive = MultiEncoder(
|
||||
EncodeStandard |
|
||||
EncodeWin |
|
||||
EncodeBackSlash |
|
||||
EncodeHashPercent |
|
||||
EncodeDel |
|
||||
EncodeCtl |
|
||||
EncodeLeftTilde |
|
||||
EncodeRightSpace |
|
||||
EncodeRightPeriod)
|
||||
|
||||
var benchTests = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||
{" leading space", " leading space"},
|
||||
{"~leading tilde", "~leading tilde"},
|
||||
{"trailing dot.", "trailing dot."},
|
||||
{" leading space/ leading space/ leading space", " leading space/ leading space/ leading space"},
|
||||
{"~leading tilde/~leading tilde/~leading tilde", "~leading tilde/~leading tilde/~leading tilde"},
|
||||
{"leading tilde/~leading tilde", "leading tilde/~leading tilde"},
|
||||
{"trailing dot./trailing dot./trailing dot.", "trailing dot./trailing dot./trailing dot."},
|
||||
}
|
||||
|
||||
func benchReplace(b *testing.B, f func(string) string) {
|
||||
for range make([]struct{}, b.N) {
|
||||
for _, test := range benchTests {
|
||||
got := f(test.in)
|
||||
if got != test.out {
|
||||
b.Errorf("Encode(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchRestore(b *testing.B, f func(string) string) {
|
||||
for range make([]struct{}, b.N) {
|
||||
for _, test := range benchTests {
|
||||
got := f(test.out)
|
||||
if got != test.in {
|
||||
b.Errorf("Decode(%q) want %q got %q", got, test.in, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func BenchmarkOneDriveReplaceNew(b *testing.B) {
|
||||
benchReplace(b, oneDrive.Encode)
|
||||
}
|
||||
func BenchmarkOneDriveReplaceOld(b *testing.B) {
|
||||
benchReplace(b, replaceReservedChars)
|
||||
}
|
||||
func BenchmarkOneDriveRestoreNew(b *testing.B) {
|
||||
benchRestore(b, oneDrive.Decode)
|
||||
}
|
||||
func BenchmarkOneDriveRestoreOld(b *testing.B) {
|
||||
benchRestore(b, restoreReservedChars)
|
||||
}
|
||||
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
':': ':', // FULLWIDTH COLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'.': '.', // FULLWIDTH FULL STOP
|
||||
'~': '~', // FULLWIDTH TILDE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Folder names can't end with a period '.'
|
||||
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
|
||||
// OneDrive for Business file or folder names cannot begin with a tilde '~'
|
||||
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
|
||||
// Apparently file names can't start with space either
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
// Encode reserved characters
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
419
lib/encoder/internal/gen/main.go
Normal file
419
lib/encoder/internal/gen/main.go
Normal file
@@ -0,0 +1,419 @@
|
||||
// +build go1.10
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
const (
|
||||
edgeLeft = iota
|
||||
edgeRight
|
||||
)
|
||||
|
||||
type mapping struct {
|
||||
mask uint
|
||||
src, dst []rune
|
||||
}
|
||||
type stringPair struct {
|
||||
a, b string
|
||||
}
|
||||
|
||||
const header = `// Code generated by ./internal/gen/main.go. DO NOT EDIT.
|
||||
|
||||
` + `//go:generate go run ./internal/gen/main.go
|
||||
|
||||
package encoder
|
||||
|
||||
`
|
||||
|
||||
var maskBits = []struct {
|
||||
mask uint
|
||||
name string
|
||||
}{
|
||||
{encoder.EncodeZero, "EncodeZero"},
|
||||
{encoder.EncodeWin, "EncodeWin"},
|
||||
{encoder.EncodeSlash, "EncodeSlash"},
|
||||
{encoder.EncodeBackSlash, "EncodeBackSlash"},
|
||||
{encoder.EncodeHashPercent, "EncodeHashPercent"},
|
||||
{encoder.EncodeDel, "EncodeDel"},
|
||||
{encoder.EncodeCtl, "EncodeCtl"},
|
||||
{encoder.EncodeLeftSpace, "EncodeLeftSpace"},
|
||||
{encoder.EncodeLeftTilde, "EncodeLeftTilde"},
|
||||
{encoder.EncodeRightSpace, "EncodeRightSpace"},
|
||||
{encoder.EncodeRightPeriod, "EncodeRightPeriod"},
|
||||
{encoder.EncodeInvalidUtf8, "EncodeInvalidUtf8"},
|
||||
}
|
||||
var edges = []struct {
|
||||
mask uint
|
||||
name string
|
||||
edge int
|
||||
orig rune
|
||||
replace rune
|
||||
}{
|
||||
{encoder.EncodeLeftSpace, "EncodeLeftSpace", edgeLeft, ' ', '␠'},
|
||||
{encoder.EncodeLeftTilde, "EncodeLeftTilde", edgeLeft, '~', '~'},
|
||||
{encoder.EncodeRightSpace, "EncodeRightSpace", edgeRight, ' ', '␠'},
|
||||
{encoder.EncodeRightPeriod, "EncodeRightPeriod", edgeRight, '.', '.'},
|
||||
}
|
||||
|
||||
var allMappings = []mapping{{
|
||||
encoder.EncodeZero, []rune{
|
||||
0,
|
||||
}, []rune{
|
||||
'␀',
|
||||
}}, {
|
||||
encoder.EncodeWin, []rune{
|
||||
':', '?', '"', '*', '<', '>', '|',
|
||||
}, []rune{
|
||||
':', '?', '"', '*', '<', '>', '|',
|
||||
}}, {
|
||||
encoder.EncodeSlash, []rune{
|
||||
'/',
|
||||
}, []rune{
|
||||
'/',
|
||||
}}, {
|
||||
encoder.EncodeBackSlash, []rune{
|
||||
'\\',
|
||||
}, []rune{
|
||||
'\',
|
||||
}}, {
|
||||
encoder.EncodeHashPercent, []rune{
|
||||
'#', '%',
|
||||
}, []rune{
|
||||
'#', '%',
|
||||
}}, {
|
||||
encoder.EncodeDel, []rune{
|
||||
0x7F,
|
||||
}, []rune{
|
||||
'␡',
|
||||
}}, {
|
||||
encoder.EncodeCtl,
|
||||
runeRange(0x01, 0x1F),
|
||||
runeRange('␁', '␟'),
|
||||
}}
|
||||
|
||||
var (
|
||||
rng = rand.New(rand.NewSource(42))
|
||||
|
||||
printables = runeRange(0x20, 0x7E)
|
||||
fullwidthPrintables = runeRange(0xFF00, 0xFF5E)
|
||||
encodables = collectEncodables(allMappings)
|
||||
encoded = collectEncoded(allMappings)
|
||||
greek = runeRange(0x03B1, 0x03C9)
|
||||
)
|
||||
|
||||
func main() {
|
||||
fd, err := os.Create("encoder_cases_test.go")
|
||||
fatal(err, "Unable to open encoder_cases_test.go:")
|
||||
defer func() {
|
||||
fatal(fd.Close(), "Failed to close encoder_cases_test.go:")
|
||||
}()
|
||||
fatalW(fd.WriteString(header))("Failed to write header:")
|
||||
|
||||
fatalW(fd.WriteString("var testCasesSingle = []testCase{\n\t"))("Write:")
|
||||
_i := 0
|
||||
i := func() (r int) {
|
||||
r, _i = _i, _i+1
|
||||
return
|
||||
}
|
||||
for _, m := range maskBits {
|
||||
if len(getMapping(m.mask).src) == 0 {
|
||||
continue
|
||||
}
|
||||
if _i != 0 {
|
||||
fatalW(fd.WriteString(" "))("Write:")
|
||||
}
|
||||
in, out := buildTestString(
|
||||
[]mapping{getMapping(m.mask)}, // pick
|
||||
[]mapping{getMapping(0)}, // quote
|
||||
printables, fullwidthPrintables, encodables, encoded, greek) // fill
|
||||
fatalW(fmt.Fprintf(fd, `{ // %d
|
||||
mask: %s,
|
||||
in: %s,
|
||||
out: %s,
|
||||
},`, i(), m.name, strconv.Quote(in), strconv.Quote(out)))("Error writing test case:")
|
||||
}
|
||||
fatalW(fd.WriteString(`
|
||||
}
|
||||
|
||||
var testCasesSingleEdge = []testCase{
|
||||
`))("Write:")
|
||||
_i = 0
|
||||
for _, e := range edges {
|
||||
if _i != 0 {
|
||||
fatalW(fd.WriteString(" "))("Write:")
|
||||
}
|
||||
fatalW(fmt.Fprintf(fd, `{ // %d
|
||||
mask: %s,
|
||||
in: %s,
|
||||
out: %s,
|
||||
},`, i(), e.name, strconv.Quote(string(e.orig)), strconv.Quote(string(e.replace))))("Error writing test case:")
|
||||
for _, m := range maskBits {
|
||||
if len(getMapping(m.mask).src) == 0 {
|
||||
continue
|
||||
}
|
||||
pairs := buildEdgeTestString(
|
||||
e.edge, e.orig, e.replace,
|
||||
[]mapping{getMapping(0), getMapping(m.mask)}, // quote
|
||||
printables, fullwidthPrintables, encodables, encoded, greek) // fill
|
||||
for _, p := range pairs {
|
||||
fatalW(fmt.Fprintf(fd, ` { // %d
|
||||
mask: %s | %s,
|
||||
in: %s,
|
||||
out: %s,
|
||||
},`, i(), m.name, e.name, strconv.Quote(p.a), strconv.Quote(p.b)))("Error writing test case:")
|
||||
}
|
||||
}
|
||||
}
|
||||
fatalW(fmt.Fprintf(fd, ` { // %d
|
||||
mask: EncodeLeftSpace,
|
||||
in: " ",
|
||||
out: "␠ ",
|
||||
}, { // %d
|
||||
mask: EncodeLeftTilde,
|
||||
in: "~~",
|
||||
out: "~~",
|
||||
}, { // %d
|
||||
mask: EncodeRightSpace,
|
||||
in: " ",
|
||||
out: " ␠",
|
||||
}, { // %d
|
||||
mask: EncodeRightPeriod,
|
||||
in: "..",
|
||||
out: "..",
|
||||
}, { // %d
|
||||
mask: EncodeLeftSpace | EncodeRightPeriod,
|
||||
in: " .",
|
||||
out: "␠.",
|
||||
}, { // %d
|
||||
mask: EncodeLeftSpace | EncodeRightSpace,
|
||||
in: " ",
|
||||
out: "␠",
|
||||
}, { // %d
|
||||
mask: EncodeLeftSpace | EncodeRightSpace,
|
||||
in: " ",
|
||||
out: "␠␠",
|
||||
}, { // %d
|
||||
mask: EncodeLeftSpace | EncodeRightSpace,
|
||||
in: " ",
|
||||
out: "␠ ␠",
|
||||
},
|
||||
}
|
||||
`, i(), i(), i(), i(), i(), i(), i(), i()))("Error writing test case:")
|
||||
}
|
||||
|
||||
func fatal(err error, s ...interface{}) {
|
||||
if err != nil {
|
||||
log.Fatalln(append(s, err))
|
||||
}
|
||||
}
|
||||
func fatalW(_ int, err error) func(...interface{}) {
|
||||
if err != nil {
|
||||
return func(s ...interface{}) {
|
||||
log.Fatalln(append(s, err))
|
||||
}
|
||||
}
|
||||
return func(s ...interface{}) {}
|
||||
}
|
||||
|
||||
// construct a slice containing the runes between (l)ow (inclusive) and (h)igh (inclusive)
|
||||
func runeRange(l, h rune) []rune {
|
||||
if h < l {
|
||||
panic("invalid range")
|
||||
}
|
||||
out := make([]rune, h-l+1)
|
||||
for i := range out {
|
||||
out[i] = l + rune(i)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func getMapping(mask uint) mapping {
|
||||
for _, m := range allMappings {
|
||||
if m.mask == mask {
|
||||
return m
|
||||
}
|
||||
}
|
||||
return mapping{}
|
||||
}
|
||||
func collectEncodables(m []mapping) (out []rune) {
|
||||
for _, s := range m {
|
||||
for _, r := range s.src {
|
||||
out = append(out, r)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
func collectEncoded(m []mapping) (out []rune) {
|
||||
for _, s := range m {
|
||||
for _, r := range s.dst {
|
||||
out = append(out, r)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func buildTestString(mappings, testMappings []mapping, fill ...[]rune) (string, string) {
|
||||
combinedMappings := append(mappings, testMappings...)
|
||||
var (
|
||||
rIn []rune
|
||||
rOut []rune
|
||||
)
|
||||
for _, m := range mappings {
|
||||
if len(m.src) == 0 || len(m.src) != len(m.dst) {
|
||||
panic("invalid length")
|
||||
}
|
||||
rIn = append(rIn, m.src...)
|
||||
rOut = append(rOut, m.dst...)
|
||||
}
|
||||
inL := len(rIn)
|
||||
testL := inL * 3
|
||||
if testL < 30 {
|
||||
testL = 30
|
||||
}
|
||||
rIn = append(rIn, make([]rune, testL-inL)...)
|
||||
rOut = append(rOut, make([]rune, testL-inL)...)
|
||||
quoteOut := make([]bool, testL)
|
||||
set := func(i int, in, out rune, quote bool) {
|
||||
rIn[i] = in
|
||||
rOut[i] = out
|
||||
quoteOut[i] = quote
|
||||
}
|
||||
for i, r := range rOut[:inL] {
|
||||
set(inL+i, r, r, true)
|
||||
}
|
||||
|
||||
outer:
|
||||
for pos := inL * 2; pos < testL; pos++ {
|
||||
m := pos % len(fill)
|
||||
i := rng.Intn(len(fill[m]))
|
||||
r := fill[m][i]
|
||||
for _, m := range combinedMappings {
|
||||
if pSrc := runePos(r, m.src); pSrc != -1 {
|
||||
set(pos, r, m.dst[pSrc], false)
|
||||
continue outer
|
||||
} else if pDst := runePos(r, m.dst); pDst != -1 {
|
||||
set(pos, r, r, true)
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
set(pos, r, r, false)
|
||||
}
|
||||
|
||||
rng.Shuffle(testL, func(i, j int) {
|
||||
rIn[i], rIn[j] = rIn[j], rIn[i]
|
||||
rOut[i], rOut[j] = rOut[j], rOut[i]
|
||||
quoteOut[i], quoteOut[j] = quoteOut[j], quoteOut[i]
|
||||
})
|
||||
|
||||
var bOut strings.Builder
|
||||
bOut.Grow(testL)
|
||||
for i, r := range rOut {
|
||||
if quoteOut[i] {
|
||||
bOut.WriteRune(encoder.QuoteRune)
|
||||
}
|
||||
bOut.WriteRune(r)
|
||||
}
|
||||
return string(rIn), bOut.String()
|
||||
}
|
||||
|
||||
func buildEdgeTestString(edge int, orig, replace rune, testMappings []mapping, fill ...[]rune) (out []stringPair) {
|
||||
testL := 30
|
||||
rIn := make([]rune, testL)
|
||||
rOut := make([]rune, testL)
|
||||
quoteOut := make([]bool, testL)
|
||||
|
||||
set := func(i int, in, out rune, quote bool) {
|
||||
rIn[i] = in
|
||||
rOut[i] = out
|
||||
quoteOut[i] = quote
|
||||
}
|
||||
|
||||
outer:
|
||||
for pos := 0; pos < testL; pos++ {
|
||||
m := pos % len(fill)
|
||||
i := rng.Intn(len(fill[m]))
|
||||
r := fill[m][i]
|
||||
for _, m := range testMappings {
|
||||
if pSrc := runePos(r, m.src); pSrc != -1 {
|
||||
set(pos, r, m.dst[pSrc], false)
|
||||
continue outer
|
||||
} else if pDst := runePos(r, m.dst); pDst != -1 {
|
||||
set(pos, r, r, true)
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
set(pos, r, r, false)
|
||||
}
|
||||
|
||||
rng.Shuffle(testL, func(i, j int) {
|
||||
rIn[i], rIn[j] = rIn[j], rIn[i]
|
||||
rOut[i], rOut[j] = rOut[j], rOut[i]
|
||||
quoteOut[i], quoteOut[j] = quoteOut[j], quoteOut[i]
|
||||
})
|
||||
set(10, orig, orig, false)
|
||||
|
||||
out = append(out, stringPair{string(rIn), quotedToString(rOut, quoteOut)})
|
||||
for _, i := range []int{0, 1, testL - 2, testL - 1} {
|
||||
for _, j := range []int{1, testL - 2, testL - 1} {
|
||||
if j < i {
|
||||
continue
|
||||
}
|
||||
rIn := append([]rune{}, rIn...)
|
||||
rOut := append([]rune{}, rOut...)
|
||||
quoteOut := append([]bool{}, quoteOut...)
|
||||
|
||||
for _, in := range []rune{orig, replace} {
|
||||
expect, quote := in, false
|
||||
if i == 0 && edge == edgeLeft ||
|
||||
i == testL-1 && edge == edgeRight {
|
||||
expect, quote = replace, in == replace
|
||||
}
|
||||
rIn[i], rOut[i], quoteOut[i] = in, expect, quote
|
||||
|
||||
if i != j {
|
||||
for _, in := range []rune{orig, replace} {
|
||||
expect, quote = in, false
|
||||
if j == testL-1 && edge == edgeRight {
|
||||
expect, quote = replace, in == replace
|
||||
}
|
||||
rIn[j], rOut[j], quoteOut[j] = in, expect, quote
|
||||
}
|
||||
}
|
||||
out = append(out, stringPair{string(rIn), quotedToString(rOut, quoteOut)})
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func runePos(r rune, s []rune) int {
|
||||
for i, c := range s {
|
||||
if c == r {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// quotedToString returns a string for the chars slice where a encoder.QuoteRune is
|
||||
// inserted before a char[i] when quoted[i] is true.
|
||||
func quotedToString(chars []rune, quoted []bool) string {
|
||||
var out strings.Builder
|
||||
out.Grow(len(chars))
|
||||
for i, r := range chars {
|
||||
if quoted[i] {
|
||||
out.WriteRune(encoder.QuoteRune)
|
||||
}
|
||||
out.WriteRune(r)
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
24
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
24
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@@ -137,7 +137,7 @@ func testOnGCE() bool {
|
||||
resc := make(chan bool, 2)
|
||||
|
||||
// Try two strategies in parallel.
|
||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
||||
// See https://github.com/googleapis/google-cloud-go/issues/194
|
||||
go func() {
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
@@ -300,8 +300,8 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", u, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := c.hc.Do(req)
|
||||
@@ -312,13 +312,13 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
@@ -499,3 +499,15 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Error contains an error response from the server.
|
||||
type Error struct {
|
||||
// Code is the HTTP response status code.
|
||||
Code int
|
||||
// Message is the server response message.
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
|
||||
}
|
||||
|
||||
9
vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
generated
vendored
9
vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
generated
vendored
@@ -79,8 +79,13 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share
|
||||
ipRange: v.IPRange,
|
||||
|
||||
// Container/Blob-specific SAS parameters
|
||||
resource: resource,
|
||||
identifier: v.Identifier,
|
||||
resource: resource,
|
||||
identifier: v.Identifier,
|
||||
cacheControl: v.CacheControl,
|
||||
contentDisposition: v.ContentDisposition,
|
||||
contentEncoding: v.ContentEncoding,
|
||||
contentLanguage: v.ContentLanguage,
|
||||
contentType: v.ContentType,
|
||||
|
||||
// Calculated SAS signature
|
||||
signature: signature,
|
||||
|
||||
4
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
generated
vendored
4
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
generated
vendored
@@ -229,7 +229,7 @@ func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedI
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
||||
func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) {
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return c.client.ListBlobFlatSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
|
||||
return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
|
||||
}
|
||||
|
||||
// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty
|
||||
@@ -242,7 +242,7 @@ func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Mark
|
||||
return nil, errors.New("snapshots are not supported in this listing operation")
|
||||
}
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.val, maxResults, include, nil, nil)
|
||||
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil)
|
||||
}
|
||||
|
||||
// ListBlobsSegmentOptions defines options available when calling ListBlobs.
|
||||
|
||||
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
generated
vendored
@@ -80,7 +80,7 @@ func appendToURLPath(u url.URL, name string) url.URL {
|
||||
// https://docs.microsoft.com/rest/api/storageservices/list-containers2.
|
||||
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) {
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return s.client.ListContainersSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
|
||||
return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
|
||||
}
|
||||
|
||||
// ListContainersOptions defines options available when calling ListContainers.
|
||||
|
||||
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
generated
vendored
@@ -1,3 +1,3 @@
|
||||
package azblob
|
||||
|
||||
const serviceLibVersion = "0.3"
|
||||
const serviceLibVersion = "0.5"
|
||||
|
||||
8
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
generated
vendored
8
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
generated
vendored
@@ -109,8 +109,8 @@ func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
|
||||
})
|
||||
}
|
||||
|
||||
// redactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
|
||||
func redactSigQueryParam(rawQuery string) (bool, string) {
|
||||
// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
|
||||
func RedactSigQueryParam(rawQuery string) (bool, string) {
|
||||
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
|
||||
sigFound := strings.Contains(rawQuery, "?sig=")
|
||||
if !sigFound {
|
||||
@@ -131,7 +131,7 @@ func redactSigQueryParam(rawQuery string) (bool, string) {
|
||||
|
||||
func prepareRequestForLogging(request pipeline.Request) *http.Request {
|
||||
req := request
|
||||
if sigFound, rawQuery := redactSigQueryParam(req.URL.RawQuery); sigFound {
|
||||
if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound {
|
||||
// Make copy so we don't destroy the query parameters we actually need to send in the request
|
||||
req = request.Copy()
|
||||
req.Request.URL.RawQuery = rawQuery
|
||||
@@ -161,7 +161,7 @@ func prepareRequestForServiceLogging(request pipeline.Request) *http.Request {
|
||||
req = request.Copy()
|
||||
url, err := url.Parse(req.Header.Get(key))
|
||||
if err == nil {
|
||||
if sigFound, rawQuery := redactSigQueryParam(url.RawQuery); sigFound {
|
||||
if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound {
|
||||
url.RawQuery = rawQuery
|
||||
req.Header.Set(xMsCopySourceHeader, url.String())
|
||||
}
|
||||
|
||||
89
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
generated
vendored
89
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
generated
vendored
@@ -5,6 +5,8 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const CountToEnd = 0
|
||||
@@ -28,6 +30,9 @@ type HTTPGetterInfo struct {
|
||||
ETag ETag
|
||||
}
|
||||
|
||||
// FailedReadNotifier is a function type that represents the notification function called when a read fails
|
||||
type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool)
|
||||
|
||||
// RetryReaderOptions contains properties which can help to decide when to do retry.
|
||||
type RetryReaderOptions struct {
|
||||
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
|
||||
@@ -36,6 +41,20 @@ type RetryReaderOptions struct {
|
||||
MaxRetryRequests int
|
||||
doInjectError bool
|
||||
doInjectErrorRound int
|
||||
|
||||
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
|
||||
NotifyFailedRead FailedReadNotifier
|
||||
|
||||
// TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
|
||||
// retryReader has the following special behaviour: closing the response body before it is all read is treated as a
|
||||
// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
|
||||
// read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If
|
||||
// TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead
|
||||
// treated as a fatal (non-retryable) error.
|
||||
// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
|
||||
// from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
|
||||
// which will be retried.
|
||||
TreatEarlyCloseAsError bool
|
||||
}
|
||||
|
||||
// retryReader implements io.ReaderCloser methods.
|
||||
@@ -45,17 +64,33 @@ type RetryReaderOptions struct {
|
||||
// through reading from the new response.
|
||||
type retryReader struct {
|
||||
ctx context.Context
|
||||
response *http.Response
|
||||
info HTTPGetterInfo
|
||||
countWasBounded bool
|
||||
o RetryReaderOptions
|
||||
getter HTTPGetter
|
||||
|
||||
// we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
|
||||
responseMu *sync.Mutex
|
||||
response *http.Response
|
||||
}
|
||||
|
||||
// NewRetryReader creates a retry reader.
|
||||
func NewRetryReader(ctx context.Context, initialResponse *http.Response,
|
||||
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
|
||||
return &retryReader{ctx: ctx, getter: getter, info: info, countWasBounded: info.Count != CountToEnd, response: initialResponse, o: o}
|
||||
return &retryReader{
|
||||
ctx: ctx,
|
||||
getter: getter,
|
||||
info: info,
|
||||
countWasBounded: info.Count != CountToEnd,
|
||||
response: initialResponse,
|
||||
responseMu: &sync.Mutex{},
|
||||
o: o}
|
||||
}
|
||||
|
||||
func (s *retryReader) setResponse(r *http.Response) {
|
||||
s.responseMu.Lock()
|
||||
defer s.responseMu.Unlock()
|
||||
s.response = r
|
||||
}
|
||||
|
||||
func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||
@@ -66,15 +101,19 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if s.response == nil { // We don't have a response stream to read from, try to get one.
|
||||
response, err := s.getter(s.ctx, s.info)
|
||||
s.responseMu.Lock()
|
||||
resp := s.response
|
||||
s.responseMu.Unlock()
|
||||
if resp == nil { // We don't have a response stream to read from, try to get one.
|
||||
newResponse, err := s.getter(s.ctx, s.info)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Successful GET; this is the network stream we'll read from.
|
||||
s.response = response
|
||||
s.setResponse(newResponse)
|
||||
resp = newResponse
|
||||
}
|
||||
n, err := s.response.Body.Read(p) // Read from the stream
|
||||
n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
|
||||
|
||||
// Injection mechanism for testing.
|
||||
if s.o.doInjectError && try == s.o.doInjectErrorRound {
|
||||
@@ -89,23 +128,49 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
return n, err // Return the return to the caller
|
||||
}
|
||||
s.Close() // Error, close stream
|
||||
s.response = nil // Our stream is no longer good
|
||||
s.Close() // Error, close stream
|
||||
s.setResponse(nil) // Our stream is no longer good
|
||||
|
||||
// Check the retry count and error code, and decide whether to retry.
|
||||
if try >= s.o.MaxRetryRequests {
|
||||
return n, err // All retries exhausted
|
||||
retriesExhausted := try >= s.o.MaxRetryRequests
|
||||
_, isNetError := err.(net.Error)
|
||||
willRetry := (isNetError || s.wasRetryableEarlyClose(err)) && !retriesExhausted
|
||||
|
||||
// Notify, for logging purposes, of any failures
|
||||
if s.o.NotifyFailedRead != nil {
|
||||
failureCount := try + 1 // because try is zero-based
|
||||
s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry)
|
||||
}
|
||||
|
||||
if _, ok := err.(net.Error); ok {
|
||||
if willRetry {
|
||||
continue
|
||||
// Loop around and try to get and read from new stream.
|
||||
}
|
||||
return n, err // Not retryable, just return
|
||||
return n, err // Not retryable, or retries exhausted, so just return
|
||||
}
|
||||
}
|
||||
|
||||
// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry
|
||||
// Is this safe, to close early from another goroutine? Early close ultimately ends up calling
|
||||
// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors"
|
||||
// which is exactly the behaviour we want.
|
||||
// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read)
|
||||
// then there are two different types of error that may happen - either the one one we check for here,
|
||||
// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
|
||||
// to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
|
||||
func (s *retryReader) wasRetryableEarlyClose(err error) bool {
|
||||
if s.o.TreatEarlyCloseAsError {
|
||||
return false // user wants all early closes to be errors, and so not retryable
|
||||
}
|
||||
// unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
|
||||
return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage)
|
||||
}
|
||||
|
||||
const ReadOnClosedBodyMessage = "read on closed response body"
|
||||
|
||||
func (s *retryReader) Close() error {
|
||||
s.responseMu.Lock()
|
||||
defer s.responseMu.Unlock()
|
||||
if s.response != nil && s.response.Body != nil {
|
||||
return s.response.Body.Close()
|
||||
}
|
||||
|
||||
72
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go
generated
vendored
72
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go
generated
vendored
@@ -47,17 +47,22 @@ const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 860
|
||||
// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
|
||||
type SASQueryParameters struct {
|
||||
// All members are immutable or values so copies of this struct are goroutine-safe.
|
||||
version string `param:"sv"`
|
||||
services string `param:"ss"`
|
||||
resourceTypes string `param:"srt"`
|
||||
protocol SASProtocol `param:"spr"`
|
||||
startTime time.Time `param:"st"`
|
||||
expiryTime time.Time `param:"se"`
|
||||
ipRange IPRange `param:"sip"`
|
||||
identifier string `param:"si"`
|
||||
resource string `param:"sr"`
|
||||
permissions string `param:"sp"`
|
||||
signature string `param:"sig"`
|
||||
version string `param:"sv"`
|
||||
services string `param:"ss"`
|
||||
resourceTypes string `param:"srt"`
|
||||
protocol SASProtocol `param:"spr"`
|
||||
startTime time.Time `param:"st"`
|
||||
expiryTime time.Time `param:"se"`
|
||||
ipRange IPRange `param:"sip"`
|
||||
identifier string `param:"si"`
|
||||
resource string `param:"sr"`
|
||||
permissions string `param:"sp"`
|
||||
signature string `param:"sig"`
|
||||
cacheControl string `param:"rscc"`
|
||||
contentDisposition string `param:"rscd"`
|
||||
contentEncoding string `param:"rsce"`
|
||||
contentLanguage string `param:"rscl"`
|
||||
contentType string `param:"rsct"`
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) Version() string {
|
||||
@@ -99,6 +104,26 @@ func (p *SASQueryParameters) Signature() string {
|
||||
return p.signature
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) CacheControl() string {
|
||||
return p.cacheControl
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) ContentDisposition() string {
|
||||
return p.contentDisposition
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) ContentEncoding() string {
|
||||
return p.contentEncoding
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) ContentLanguage() string {
|
||||
return p.contentLanguage
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) ContentType() string {
|
||||
return p.contentType
|
||||
}
|
||||
|
||||
// IPRange represents a SAS IP range's start IP and (optionally) end IP.
|
||||
type IPRange struct {
|
||||
Start net.IP // Not specified if length = 0
|
||||
@@ -155,6 +180,16 @@ func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool
|
||||
p.permissions = val
|
||||
case "sig":
|
||||
p.signature = val
|
||||
case "rscc":
|
||||
p.cacheControl = val
|
||||
case "rscd":
|
||||
p.contentDisposition = val
|
||||
case "rsce":
|
||||
p.contentEncoding = val
|
||||
case "rscl":
|
||||
p.contentLanguage = val
|
||||
case "rsct":
|
||||
p.contentType = val
|
||||
default:
|
||||
isSASKey = false // We didn't recognize the query parameter
|
||||
}
|
||||
@@ -200,6 +235,21 @@ func (p *SASQueryParameters) addToValues(v url.Values) url.Values {
|
||||
if p.signature != "" {
|
||||
v.Add("sig", p.signature)
|
||||
}
|
||||
if p.cacheControl != "" {
|
||||
v.Add("rscc", p.cacheControl)
|
||||
}
|
||||
if p.contentDisposition != "" {
|
||||
v.Add("rscd", p.contentDisposition)
|
||||
}
|
||||
if p.contentEncoding != "" {
|
||||
v.Add("rsce", p.contentEncoding)
|
||||
}
|
||||
if p.contentLanguage != "" {
|
||||
v.Add("rscl", p.contentLanguage)
|
||||
}
|
||||
if p.contentType != "" {
|
||||
v.Add("rsct", p.contentType)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
generated
vendored
@@ -6,13 +6,14 @@ package azblob
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// appendBlobClient is the client for the AppendBlob methods of the Azblob service.
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
generated
vendored
@@ -8,13 +8,14 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// blockBlobClient is the client for the BlockBlob methods of the Azblob service.
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
generated
vendored
@@ -4,8 +4,9 @@ package azblob
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go
generated
vendored
@@ -7,13 +7,14 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// containerClient is the client for the Container methods of the Azblob service.
|
||||
|
||||
6
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
generated
vendored
6
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
generated
vendored
@@ -55,7 +55,7 @@ func (md *Metadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
|
||||
// Marker represents an opaque value used in paged responses.
|
||||
type Marker struct {
|
||||
val *string
|
||||
Val *string
|
||||
}
|
||||
|
||||
// NotDone returns true if the list enumeration should be started or is not yet complete. Specifically, NotDone returns true
|
||||
@@ -63,14 +63,14 @@ type Marker struct {
|
||||
// the service. NotDone also returns true whenever the service returns an interim result portion. NotDone returns false only
|
||||
// after the service has returned the final result portion.
|
||||
func (m Marker) NotDone() bool {
|
||||
return m.val == nil || *m.val != ""
|
||||
return m.Val == nil || *m.Val != ""
|
||||
}
|
||||
|
||||
// UnmarshalXML implements the xml.Unmarshaler interface for Marker.
|
||||
func (m *Marker) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var out string
|
||||
err := d.DecodeElement(&out, &start)
|
||||
m.val = &out
|
||||
m.Val = &out
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
generated
vendored
@@ -7,13 +7,14 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// pageBlobClient is the client for the PageBlob methods of the Azblob service.
|
||||
|
||||
@@ -7,8 +7,9 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
type responder func(resp pipeline.Response) (result pipeline.Response, err error)
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go
generated
vendored
@@ -6,9 +6,10 @@ package azblob
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// if you want to provide custom error handling set this variable to your constructor function
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go
generated
vendored
@@ -7,12 +7,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// serviceClient is the client for the Service methods of the Azblob service.
|
||||
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go
generated
vendored
@@ -5,10 +5,11 @@ package azblob
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// Constraint stores constraint name, target field name
|
||||
|
||||
12
vendor/github.com/a8m/tree/.travis.yml
generated
vendored
12
vendor/github.com/a8m/tree/.travis.yml
generated
vendored
@@ -1,10 +1,16 @@
|
||||
language: go
|
||||
sudo: false
|
||||
go:
|
||||
- 1.6.4
|
||||
- 1.7.4
|
||||
- 1.8.3
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- tip
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
install:
|
||||
- go get -t -v ./...
|
||||
script:
|
||||
|
||||
17
vendor/github.com/a8m/tree/compileall.sh
generated
vendored
17
vendor/github.com/a8m/tree/compileall.sh
generated
vendored
@@ -5,10 +5,23 @@ go tool dist list >/dev/null || {
|
||||
exit 0
|
||||
}
|
||||
|
||||
failures=0
|
||||
while read -r line; do
|
||||
parts=(${line//\// })
|
||||
export GOOS=${parts[0]}
|
||||
export GOARCH=${parts[1]}
|
||||
echo Try GOOS=${GOOS} GOARCH=${GOARCH}
|
||||
go install
|
||||
if go tool compile -V >/dev/null 2>&1 ; then
|
||||
echo Try GOOS=${GOOS} GOARCH=${GOARCH}
|
||||
if ! go install; then
|
||||
echo "*** Failed compiling GOOS=${GOOS} GOARCH=${GOARCH}"
|
||||
failures=$((failures+1))
|
||||
fi
|
||||
else
|
||||
echo Skipping GOOS=${GOOS} GOARCH=${GOARCH} as not supported
|
||||
fi
|
||||
done < <(go tool dist list)
|
||||
|
||||
if [ $failures -ne 0 ]; then
|
||||
echo "*** $failures compile failures"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
25
vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
generated
vendored
25
vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
generated
vendored
@@ -23,28 +23,27 @@ func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
|
||||
case reflect.Struct:
|
||||
buf.WriteString("{\n")
|
||||
|
||||
names := []string{}
|
||||
for i := 0; i < v.Type().NumField(); i++ {
|
||||
name := v.Type().Field(i).Name
|
||||
f := v.Field(i)
|
||||
if name[0:1] == strings.ToLower(name[0:1]) {
|
||||
ft := v.Type().Field(i)
|
||||
fv := v.Field(i)
|
||||
|
||||
if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
|
||||
if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
|
||||
continue // ignore unset fields
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
for i, n := range names {
|
||||
val := v.FieldByName(n)
|
||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||
buf.WriteString(n + ": ")
|
||||
stringValue(val, indent+2, buf)
|
||||
buf.WriteString(ft.Name + ": ")
|
||||
|
||||
if i < len(names)-1 {
|
||||
buf.WriteString(",\n")
|
||||
if tag := ft.Tag.Get("sensitive"); tag == "true" {
|
||||
buf.WriteString("<sensitive>")
|
||||
} else {
|
||||
stringValue(fv, indent+2, buf)
|
||||
}
|
||||
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
|
||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
@@ -18,7 +18,7 @@ type Config struct {
|
||||
|
||||
// States that the signing name did not come from a modeled source but
|
||||
// was derived based on other data. Used by service client constructors
|
||||
// to determine if the signin name can be overriden based on metadata the
|
||||
// to determine if the signin name can be overridden based on metadata the
|
||||
// service has.
|
||||
SigningNameDerived bool
|
||||
}
|
||||
|
||||
18
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
@@ -239,6 +239,13 @@ type Config struct {
|
||||
// Key: aws.String("/foo/bar/moo"),
|
||||
// })
|
||||
EnableEndpointDiscovery *bool
|
||||
|
||||
// DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
|
||||
// request endpoint hosts with modeled information.
|
||||
//
|
||||
// Disabling this feature is useful when you want to use local endpoints
|
||||
// for testing that do not support the modeled host prefix pattern.
|
||||
DisableEndpointHostPrefix *bool
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config pointer that can be chained with builder
|
||||
@@ -399,6 +406,13 @@ func (c *Config) WithEndpointDiscovery(t bool) *Config {
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
|
||||
// when making requests.
|
||||
func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
|
||||
c.DisableEndpointHostPrefix = &t
|
||||
return c
|
||||
}
|
||||
|
||||
// MergeIn merges the passed in configs into the existing config object.
|
||||
func (c *Config) MergeIn(cfgs ...*Config) {
|
||||
for _, other := range cfgs {
|
||||
@@ -502,6 +516,10 @@ func mergeInConfig(dst *Config, other *Config) {
|
||||
if other.EnableEndpointDiscovery != nil {
|
||||
dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
|
||||
}
|
||||
|
||||
if other.DisableEndpointHostPrefix != nil {
|
||||
dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
|
||||
}
|
||||
}
|
||||
|
||||
// Copy will return a shallow copy of the Config object. If any additional
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
// +build !go1.9
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
import "time"
|
||||
|
||||
// Context is an copy of the Go v1.7 stdlib's context.Context interface.
|
||||
// It is represented as a SDK interface to enable you to use the "WithContext"
|
||||
@@ -35,37 +35,3 @@ type Context interface {
|
||||
// functions.
|
||||
Value(key interface{}) interface{}
|
||||
}
|
||||
|
||||
// BackgroundContext returns a context that will never be canceled, has no
|
||||
// values, and no deadline. This context is used by the SDK to provide
|
||||
// backwards compatibility with non-context API operations and functionality.
|
||||
//
|
||||
// Go 1.6 and before:
|
||||
// This context function is equivalent to context.Background in the Go stdlib.
|
||||
//
|
||||
// Go 1.7 and later:
|
||||
// The context returned will be the value returned by context.Background()
|
||||
//
|
||||
// See https://golang.org/pkg/context for more information on Contexts.
|
||||
func BackgroundContext() Context {
|
||||
return backgroundCtx
|
||||
}
|
||||
|
||||
// SleepWithContext will wait for the timer duration to expire, or the context
|
||||
// is canceled. Which ever happens first. If the context is canceled the Context's
|
||||
// error will be returned.
|
||||
//
|
||||
// Expects Context to always return a non-nil error if the Done channel is closed.
|
||||
func SleepWithContext(ctx Context, dur time.Duration) error {
|
||||
t := time.NewTimer(dur)
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
9
vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
generated
vendored
@@ -1,9 +0,0 @@
|
||||
// +build go1.7
|
||||
|
||||
package aws
|
||||
|
||||
import "context"
|
||||
|
||||
var (
|
||||
backgroundCtx = context.Background()
|
||||
)
|
||||
11
vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
generated
vendored
Normal file
11
vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build go1.9
|
||||
|
||||
package aws
|
||||
|
||||
import "context"
|
||||
|
||||
// Context is an alias of the Go stdlib's context.Context interface.
|
||||
// It can be used within the SDK's API operation "WithContext" methods.
|
||||
//
|
||||
// See https://golang.org/pkg/context on how to use contexts.
|
||||
type Context = context.Context
|
||||
@@ -39,3 +39,18 @@ func (e *emptyCtx) String() string {
|
||||
var (
|
||||
backgroundCtx = new(emptyCtx)
|
||||
)
|
||||
|
||||
// BackgroundContext returns a context that will never be canceled, has no
|
||||
// values, and no deadline. This context is used by the SDK to provide
|
||||
// backwards compatibility with non-context API operations and functionality.
|
||||
//
|
||||
// Go 1.6 and before:
|
||||
// This context function is equivalent to context.Background in the Go stdlib.
|
||||
//
|
||||
// Go 1.7 and later:
|
||||
// The context returned will be the value returned by context.Background()
|
||||
//
|
||||
// See https://golang.org/pkg/context for more information on Contexts.
|
||||
func BackgroundContext() Context {
|
||||
return backgroundCtx
|
||||
}
|
||||
20
vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
generated
vendored
Normal file
20
vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// +build go1.7
|
||||
|
||||
package aws
|
||||
|
||||
import "context"
|
||||
|
||||
// BackgroundContext returns a context that will never be canceled, has no
|
||||
// values, and no deadline. This context is used by the SDK to provide
|
||||
// backwards compatibility with non-context API operations and functionality.
|
||||
//
|
||||
// Go 1.6 and before:
|
||||
// This context function is equivalent to context.Background in the Go stdlib.
|
||||
//
|
||||
// Go 1.7 and later:
|
||||
// The context returned will be the value returned by context.Background()
|
||||
//
|
||||
// See https://golang.org/pkg/context for more information on Contexts.
|
||||
func BackgroundContext() Context {
|
||||
return context.Background()
|
||||
}
|
||||
24
vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
generated
vendored
Normal file
24
vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// SleepWithContext will wait for the timer duration to expire, or the context
|
||||
// is canceled. Which ever happens first. If the context is canceled the Context's
|
||||
// error will be returned.
|
||||
//
|
||||
// Expects Context to always return a non-nil error if the Done channel is closed.
|
||||
func SleepWithContext(ctx Context, dur time.Duration) error {
|
||||
t := time.NewTimer(dur)
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
4
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
@@ -72,9 +72,9 @@ var ValidateReqSigHandler = request.NamedHandler{
|
||||
signedTime = r.LastSignedAt
|
||||
}
|
||||
|
||||
// 10 minutes to allow for some clock skew/delays in transmission.
|
||||
// 5 minutes to allow for some clock skew/delays in transmission.
|
||||
// Would be improved with aws/aws-sdk-go#423
|
||||
if signedTime.Add(10 * time.Minute).After(time.Now()) {
|
||||
if signedTime.Add(5 * time.Minute).After(time.Now()) {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
generated
vendored
@@ -17,7 +17,7 @@ var SDKVersionUserAgentHandler = request.NamedHandler{
|
||||
}
|
||||
|
||||
const execEnvVar = `AWS_EXECUTION_ENV`
|
||||
const execEnvUAKey = `exec_env`
|
||||
const execEnvUAKey = `exec-env`
|
||||
|
||||
// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
|
||||
// execution environment to the user agent.
|
||||
|
||||
35
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
35
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
@@ -49,6 +49,8 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -97,6 +99,14 @@ type Provider interface {
|
||||
IsExpired() bool
|
||||
}
|
||||
|
||||
// An Expirer is an interface that Providers can implement to expose the expiration
|
||||
// time, if known. If the Provider cannot accurately provide this info,
|
||||
// it should not implement this interface.
|
||||
type Expirer interface {
|
||||
// The time at which the credentials are no longer valid
|
||||
ExpiresAt() time.Time
|
||||
}
|
||||
|
||||
// An ErrorProvider is a stub credentials provider that always returns an error
|
||||
// this is used by the SDK when construction a known provider is not possible
|
||||
// due to an error.
|
||||
@@ -163,6 +173,11 @@ func (e *Expiry) IsExpired() bool {
|
||||
return e.expiration.Before(curTime())
|
||||
}
|
||||
|
||||
// ExpiresAt returns the expiration time of the credential
|
||||
func (e *Expiry) ExpiresAt() time.Time {
|
||||
return e.expiration
|
||||
}
|
||||
|
||||
// A Credentials provides concurrency safe retrieval of AWS credentials Value.
|
||||
// Credentials will cache the credentials value until they expire. Once the value
|
||||
// expires the next Get will attempt to retrieve valid credentials.
|
||||
@@ -255,3 +270,23 @@ func (c *Credentials) IsExpired() bool {
|
||||
func (c *Credentials) isExpired() bool {
|
||||
return c.forceRefresh || c.provider.IsExpired()
|
||||
}
|
||||
|
||||
// ExpiresAt provides access to the functionality of the Expirer interface of
|
||||
// the underlying Provider, if it supports that interface. Otherwise, it returns
|
||||
// an error.
|
||||
func (c *Credentials) ExpiresAt() (time.Time, error) {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
expirer, ok := c.provider.(Expirer)
|
||||
if !ok {
|
||||
return time.Time{}, awserr.New("ProviderNotExpirer",
|
||||
fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName),
|
||||
nil)
|
||||
}
|
||||
if c.forceRefresh {
|
||||
// set expiration time to the distant past
|
||||
return time.Time{}, nil
|
||||
}
|
||||
return expirer.ExpiresAt(), nil
|
||||
}
|
||||
|
||||
425
vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
generated
vendored
Normal file
425
vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
generated
vendored
Normal file
@@ -0,0 +1,425 @@
|
||||
/*
|
||||
Package processcreds is a credential Provider to retrieve `credential_process`
|
||||
credentials.
|
||||
|
||||
WARNING: The following describes a method of sourcing credentials from an external
|
||||
process. This can potentially be dangerous, so proceed with caution. Other
|
||||
credential providers should be preferred if at all possible. If using this
|
||||
option, you should make sure that the config file is as locked down as possible
|
||||
using security best practices for your operating system.
|
||||
|
||||
You can use credentials from a `credential_process` in a variety of ways.
|
||||
|
||||
One way is to setup your shared config file, located in the default
|
||||
location, with the `credential_process` key and the command you want to be
|
||||
called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
|
||||
(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
|
||||
|
||||
[default]
|
||||
credential_process = /command/to/call
|
||||
|
||||
Creating a new session will use the credential process to retrieve credentials.
|
||||
NOTE: If there are credentials in the profile you are using, the credential
|
||||
process will not be used.
|
||||
|
||||
// Initialize a session to load credentials.
|
||||
sess, _ := session.NewSession(&aws.Config{
|
||||
Region: aws.String("us-east-1")},
|
||||
)
|
||||
|
||||
// Create S3 service client to use the credentials.
|
||||
svc := s3.New(sess)
|
||||
|
||||
Another way to use the `credential_process` method is by using
|
||||
`credentials.NewCredentials()` and providing a command to be executed to
|
||||
retrieve credentials:
|
||||
|
||||
// Create credentials using the ProcessProvider.
|
||||
creds := processcreds.NewCredentials("/path/to/command")
|
||||
|
||||
// Create service client value configured for credentials.
|
||||
svc := s3.New(sess, &aws.Config{Credentials: creds})
|
||||
|
||||
You can set a non-default timeout for the `credential_process` with another
|
||||
constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
|
||||
set a one minute timeout:
|
||||
|
||||
// Create credentials using the ProcessProvider.
|
||||
creds := processcreds.NewCredentialsTimeout(
|
||||
"/path/to/command",
|
||||
time.Duration(500) * time.Millisecond)
|
||||
|
||||
If you need more control, you can set any configurable options in the
|
||||
credentials using one or more option functions. For example, you can set a two
|
||||
minute timeout, a credential duration of 60 minutes, and a maximum stdout
|
||||
buffer size of 2k.
|
||||
|
||||
creds := processcreds.NewCredentials(
|
||||
"/path/to/command",
|
||||
func(opt *ProcessProvider) {
|
||||
opt.Timeout = time.Duration(2) * time.Minute
|
||||
opt.Duration = time.Duration(60) * time.Minute
|
||||
opt.MaxBufSize = 2048
|
||||
})
|
||||
|
||||
You can also use your own `exec.Cmd`:
|
||||
|
||||
// Create an exec.Cmd
|
||||
myCommand := exec.Command("/path/to/command")
|
||||
|
||||
// Create credentials using your exec.Cmd and custom timeout
|
||||
creds := processcreds.NewCredentialsCommand(
|
||||
myCommand,
|
||||
func(opt *processcreds.ProcessProvider) {
|
||||
opt.Timeout = time.Duration(1) * time.Second
|
||||
})
|
||||
*/
|
||||
package processcreds
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
// ProviderName is the name this credentials provider will label any
|
||||
// returned credentials Value with.
|
||||
ProviderName = `ProcessProvider`
|
||||
|
||||
// ErrCodeProcessProviderParse error parsing process output
|
||||
ErrCodeProcessProviderParse = "ProcessProviderParseError"
|
||||
|
||||
// ErrCodeProcessProviderVersion version error in output
|
||||
ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
|
||||
|
||||
// ErrCodeProcessProviderRequired required attribute missing in output
|
||||
ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
|
||||
|
||||
// ErrCodeProcessProviderExecution execution of command failed
|
||||
ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
|
||||
|
||||
// errMsgProcessProviderTimeout process took longer than allowed
|
||||
errMsgProcessProviderTimeout = "credential process timed out"
|
||||
|
||||
// errMsgProcessProviderProcess process error
|
||||
errMsgProcessProviderProcess = "error in credential_process"
|
||||
|
||||
// errMsgProcessProviderParse problem parsing output
|
||||
errMsgProcessProviderParse = "parse failed of credential_process output"
|
||||
|
||||
// errMsgProcessProviderVersion version error in output
|
||||
errMsgProcessProviderVersion = "wrong version in process output (not 1)"
|
||||
|
||||
// errMsgProcessProviderMissKey missing access key id in output
|
||||
errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
|
||||
|
||||
// errMsgProcessProviderMissSecret missing secret acess key in output
|
||||
errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
|
||||
|
||||
// errMsgProcessProviderPrepareCmd prepare of command failed
|
||||
errMsgProcessProviderPrepareCmd = "failed to prepare command"
|
||||
|
||||
// errMsgProcessProviderEmptyCmd command must not be empty
|
||||
errMsgProcessProviderEmptyCmd = "command must not be empty"
|
||||
|
||||
// errMsgProcessProviderPipe failed to initialize pipe
|
||||
errMsgProcessProviderPipe = "failed to initialize pipe"
|
||||
|
||||
// DefaultDuration is the default amount of time in minutes that the
|
||||
// credentials will be valid for.
|
||||
DefaultDuration = time.Duration(15) * time.Minute
|
||||
|
||||
// DefaultBufSize limits buffer size from growing to an enormous
|
||||
// amount due to a faulty process.
|
||||
DefaultBufSize = 1024
|
||||
|
||||
// DefaultTimeout default limit on time a process can run.
|
||||
DefaultTimeout = time.Duration(1) * time.Minute
|
||||
)
|
||||
|
||||
// ProcessProvider satisfies the credentials.Provider interface, and is a
|
||||
// client to retrieve credentials from a process.
|
||||
type ProcessProvider struct {
|
||||
staticCreds bool
|
||||
credentials.Expiry
|
||||
originalCommand []string
|
||||
|
||||
// Expiry duration of the credentials. Defaults to 15 minutes if not set.
|
||||
Duration time.Duration
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
|
||||
// A string representing an os command that should return a JSON with
|
||||
// credential information.
|
||||
command *exec.Cmd
|
||||
|
||||
// MaxBufSize limits memory usage from growing to an enormous
|
||||
// amount due to a faulty process.
|
||||
MaxBufSize int
|
||||
|
||||
// Timeout limits the time a process can run.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials object wrapping the
|
||||
// ProcessProvider. The credentials will expire every 15 minutes by default.
|
||||
func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
|
||||
p := &ProcessProvider{
|
||||
command: exec.Command(command),
|
||||
Duration: DefaultDuration,
|
||||
Timeout: DefaultTimeout,
|
||||
MaxBufSize: DefaultBufSize,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// NewCredentialsTimeout returns a pointer to a new Credentials object with
|
||||
// the specified command and timeout, and default duration and max buffer size.
|
||||
func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
|
||||
p := NewCredentials(command, func(opt *ProcessProvider) {
|
||||
opt.Timeout = timeout
|
||||
})
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// NewCredentialsCommand returns a pointer to a new Credentials object with
|
||||
// the specified command, and default timeout, duration and max buffer size.
|
||||
func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
|
||||
p := &ProcessProvider{
|
||||
command: command,
|
||||
Duration: DefaultDuration,
|
||||
Timeout: DefaultTimeout,
|
||||
MaxBufSize: DefaultBufSize,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
type credentialProcessResponse struct {
|
||||
Version int
|
||||
AccessKeyID string `json:"AccessKeyId"`
|
||||
SecretAccessKey string
|
||||
SessionToken string
|
||||
Expiration *time.Time
|
||||
}
|
||||
|
||||
// Retrieve executes the 'credential_process' and returns the credentials.
|
||||
func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
|
||||
out, err := p.executeCredentialProcess()
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
|
||||
// Serialize and validate response
|
||||
resp := &credentialProcessResponse{}
|
||||
if err = json.Unmarshal(out, resp); err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderParse,
|
||||
fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
|
||||
err)
|
||||
}
|
||||
|
||||
if resp.Version != 1 {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderVersion,
|
||||
errMsgProcessProviderVersion,
|
||||
nil)
|
||||
}
|
||||
|
||||
if len(resp.AccessKeyID) == 0 {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderRequired,
|
||||
errMsgProcessProviderMissKey,
|
||||
nil)
|
||||
}
|
||||
|
||||
if len(resp.SecretAccessKey) == 0 {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderRequired,
|
||||
errMsgProcessProviderMissSecret,
|
||||
nil)
|
||||
}
|
||||
|
||||
// Handle expiration
|
||||
p.staticCreds = resp.Expiration == nil
|
||||
if resp.Expiration != nil {
|
||||
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
|
||||
}
|
||||
|
||||
return credentials.Value{
|
||||
ProviderName: ProviderName,
|
||||
AccessKeyID: resp.AccessKeyID,
|
||||
SecretAccessKey: resp.SecretAccessKey,
|
||||
SessionToken: resp.SessionToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired returns true if the credentials retrieved are expired, or not yet
|
||||
// retrieved.
|
||||
func (p *ProcessProvider) IsExpired() bool {
|
||||
if p.staticCreds {
|
||||
return false
|
||||
}
|
||||
return p.Expiry.IsExpired()
|
||||
}
|
||||
|
||||
// prepareCommand prepares the command to be executed.
|
||||
func (p *ProcessProvider) prepareCommand() error {
|
||||
|
||||
var cmdArgs []string
|
||||
if runtime.GOOS == "windows" {
|
||||
cmdArgs = []string{"cmd.exe", "/C"}
|
||||
} else {
|
||||
cmdArgs = []string{"sh", "-c"}
|
||||
}
|
||||
|
||||
if len(p.originalCommand) == 0 {
|
||||
p.originalCommand = make([]string, len(p.command.Args))
|
||||
copy(p.originalCommand, p.command.Args)
|
||||
|
||||
// check for empty command because it succeeds
|
||||
if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
|
||||
return awserr.New(
|
||||
ErrCodeProcessProviderExecution,
|
||||
fmt.Sprintf(
|
||||
"%s: %s",
|
||||
errMsgProcessProviderPrepareCmd,
|
||||
errMsgProcessProviderEmptyCmd),
|
||||
nil)
|
||||
}
|
||||
}
|
||||
|
||||
cmdArgs = append(cmdArgs, p.originalCommand...)
|
||||
p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
|
||||
p.command.Env = os.Environ()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executeCredentialProcess starts the credential process on the OS and
|
||||
// returns the results or an error.
|
||||
func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
|
||||
|
||||
if err := p.prepareCommand(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup the pipes
|
||||
outReadPipe, outWritePipe, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, awserr.New(
|
||||
ErrCodeProcessProviderExecution,
|
||||
errMsgProcessProviderPipe,
|
||||
err)
|
||||
}
|
||||
|
||||
p.command.Stderr = os.Stderr // display stderr on console for MFA
|
||||
p.command.Stdout = outWritePipe // get creds json on process's stdout
|
||||
p.command.Stdin = os.Stdin // enable stdin for MFA
|
||||
|
||||
output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
|
||||
|
||||
stdoutCh := make(chan error, 1)
|
||||
go readInput(
|
||||
io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
|
||||
output,
|
||||
stdoutCh)
|
||||
|
||||
execCh := make(chan error, 1)
|
||||
go executeCommand(*p.command, execCh)
|
||||
|
||||
finished := false
|
||||
var errors []error
|
||||
for !finished {
|
||||
select {
|
||||
case readError := <-stdoutCh:
|
||||
errors = appendError(errors, readError)
|
||||
finished = true
|
||||
case execError := <-execCh:
|
||||
err := outWritePipe.Close()
|
||||
errors = appendError(errors, err)
|
||||
errors = appendError(errors, execError)
|
||||
if errors != nil {
|
||||
return output.Bytes(), awserr.NewBatchError(
|
||||
ErrCodeProcessProviderExecution,
|
||||
errMsgProcessProviderProcess,
|
||||
errors)
|
||||
}
|
||||
case <-time.After(p.Timeout):
|
||||
finished = true
|
||||
return output.Bytes(), awserr.NewBatchError(
|
||||
ErrCodeProcessProviderExecution,
|
||||
errMsgProcessProviderTimeout,
|
||||
errors) // errors can be nil
|
||||
}
|
||||
}
|
||||
|
||||
out := output.Bytes()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// windows adds slashes to quotes
|
||||
out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// appendError conveniently checks for nil before appending slice
|
||||
func appendError(errors []error, err error) []error {
|
||||
if err != nil {
|
||||
return append(errors, err)
|
||||
}
|
||||
return errors
|
||||
}
|
||||
|
||||
func executeCommand(cmd exec.Cmd, exec chan error) {
|
||||
// Start the command
|
||||
err := cmd.Start()
|
||||
if err == nil {
|
||||
err = cmd.Wait()
|
||||
}
|
||||
|
||||
exec <- err
|
||||
}
|
||||
|
||||
func readInput(r io.Reader, w io.Writer, read chan error) {
|
||||
tee := io.TeeReader(r, w)
|
||||
|
||||
_, err := ioutil.ReadAll(tee)
|
||||
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
|
||||
read <- err // will only arrive here when write end of pipe is closed
|
||||
}
|
||||
56
vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
generated
vendored
56
vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
generated
vendored
@@ -3,6 +3,8 @@ package csm
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
)
|
||||
|
||||
type metricTime time.Time
|
||||
@@ -39,6 +41,12 @@ type metric struct {
|
||||
SDKException *string `json:"SdkException,omitempty"`
|
||||
SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
|
||||
|
||||
FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"`
|
||||
FinalAWSException *string `json:"FinalAwsException,omitempty"`
|
||||
FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"`
|
||||
FinalSDKException *string `json:"FinalSdkException,omitempty"`
|
||||
FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"`
|
||||
|
||||
DestinationIP *string `json:"DestinationIp,omitempty"`
|
||||
ConnectionReused *int `json:"ConnectionReused,omitempty"`
|
||||
|
||||
@@ -51,3 +59,51 @@ type metric struct {
|
||||
|
||||
MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
|
||||
}
|
||||
|
||||
func (m *metric) TruncateFields() {
|
||||
m.ClientID = truncateString(m.ClientID, 255)
|
||||
m.UserAgent = truncateString(m.UserAgent, 256)
|
||||
|
||||
m.AWSException = truncateString(m.AWSException, 128)
|
||||
m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512)
|
||||
|
||||
m.SDKException = truncateString(m.SDKException, 128)
|
||||
m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512)
|
||||
|
||||
m.FinalAWSException = truncateString(m.FinalAWSException, 128)
|
||||
m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512)
|
||||
|
||||
m.FinalSDKException = truncateString(m.FinalSDKException, 128)
|
||||
m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512)
|
||||
}
|
||||
|
||||
func truncateString(v *string, l int) *string {
|
||||
if v != nil && len(*v) > l {
|
||||
nv := (*v)[:l]
|
||||
return &nv
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (m *metric) SetException(e metricException) {
|
||||
switch te := e.(type) {
|
||||
case awsException:
|
||||
m.AWSException = aws.String(te.exception)
|
||||
m.AWSExceptionMessage = aws.String(te.message)
|
||||
case sdkException:
|
||||
m.SDKException = aws.String(te.exception)
|
||||
m.SDKExceptionMessage = aws.String(te.message)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metric) SetFinalException(e metricException) {
|
||||
switch te := e.(type) {
|
||||
case awsException:
|
||||
m.FinalAWSException = aws.String(te.exception)
|
||||
m.FinalAWSExceptionMessage = aws.String(te.message)
|
||||
case sdkException:
|
||||
m.FinalSDKException = aws.String(te.exception)
|
||||
m.FinalSDKExceptionMessage = aws.String(te.message)
|
||||
}
|
||||
}
|
||||
|
||||
26
vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
generated
vendored
Normal file
26
vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
package csm
|
||||
|
||||
type metricException interface {
|
||||
Exception() string
|
||||
Message() string
|
||||
}
|
||||
|
||||
type requestException struct {
|
||||
exception string
|
||||
message string
|
||||
}
|
||||
|
||||
func (e requestException) Exception() string {
|
||||
return e.exception
|
||||
}
|
||||
func (e requestException) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
type awsException struct {
|
||||
requestException
|
||||
}
|
||||
|
||||
type sdkException struct {
|
||||
requestException
|
||||
}
|
||||
42
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
42
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
@@ -82,14 +82,15 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
|
||||
|
||||
if r.Error != nil {
|
||||
if awserr, ok := r.Error.(awserr.Error); ok {
|
||||
setError(&m, awserr)
|
||||
m.SetException(getMetricException(awserr))
|
||||
}
|
||||
}
|
||||
|
||||
m.TruncateFields()
|
||||
rep.metricsCh.Push(m)
|
||||
}
|
||||
|
||||
func setError(m *metric, err awserr.Error) {
|
||||
func getMetricException(err awserr.Error) metricException {
|
||||
msg := err.Error()
|
||||
code := err.Code()
|
||||
|
||||
@@ -97,11 +98,13 @@ func setError(m *metric, err awserr.Error) {
|
||||
case "RequestError",
|
||||
"SerializationError",
|
||||
request.CanceledErrorCode:
|
||||
m.SDKException = &code
|
||||
m.SDKExceptionMessage = &msg
|
||||
return sdkException{
|
||||
requestException{exception: code, message: msg},
|
||||
}
|
||||
default:
|
||||
m.AWSException = &code
|
||||
m.AWSExceptionMessage = &msg
|
||||
return awsException{
|
||||
requestException{exception: code, message: msg},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,6 +119,7 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) {
|
||||
API: aws.String(r.Operation.Name),
|
||||
Service: aws.String(r.ClientInfo.ServiceID),
|
||||
Timestamp: (*metricTime)(&now),
|
||||
UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
|
||||
Type: aws.String("ApiCall"),
|
||||
AttemptCount: aws.Int(r.RetryCount + 1),
|
||||
Region: r.Config.Region,
|
||||
@@ -124,6 +128,18 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) {
|
||||
MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
|
||||
}
|
||||
|
||||
if r.HTTPResponse != nil {
|
||||
m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
|
||||
}
|
||||
|
||||
if r.Error != nil {
|
||||
if awserr, ok := r.Error.(awserr.Error); ok {
|
||||
m.SetFinalException(getMetricException(awserr))
|
||||
}
|
||||
}
|
||||
|
||||
m.TruncateFields()
|
||||
|
||||
// TODO: Probably want to figure something out for logging dropped
|
||||
// metrics
|
||||
rep.metricsCh.Push(m)
|
||||
@@ -223,13 +239,15 @@ func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
|
||||
return
|
||||
}
|
||||
|
||||
apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric}
|
||||
apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric}
|
||||
handlers.Complete.PushFrontNamed(request.NamedHandler{
|
||||
Name: APICallMetricHandlerName,
|
||||
Fn: rep.sendAPICallMetric,
|
||||
})
|
||||
|
||||
handlers.Complete.PushFrontNamed(apiCallHandler)
|
||||
handlers.Complete.PushFrontNamed(apiCallAttemptHandler)
|
||||
|
||||
handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler)
|
||||
handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{
|
||||
Name: APICallAttemptMetricHandlerName,
|
||||
Fn: rep.sendAPICallAttemptMetric,
|
||||
})
|
||||
}
|
||||
|
||||
// boolIntValue return 1 for true and 0 for false.
|
||||
|
||||
4
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
@@ -118,6 +118,10 @@ func (c *EC2Metadata) Region() (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(resp) == 0 {
|
||||
return "", awserr.New("EC2MetadataError", "invalid Region response", nil)
|
||||
}
|
||||
|
||||
// returns region without the suffix. Eg: us-west-2a becomes us-west-2
|
||||
return resp[:len(resp)-1], nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
@@ -4,7 +4,7 @@
|
||||
// This package's client can be disabled completely by setting the environment
|
||||
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
|
||||
// true instructs the SDK to disable the EC2 Metadata client. The client cannot
|
||||
// be used while the environemnt variable is set to true, (case insensitive).
|
||||
// be used while the environment variable is set to true, (case insensitive).
|
||||
package ec2metadata
|
||||
|
||||
import (
|
||||
|
||||
28
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
28
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
@@ -85,6 +85,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol
|
||||
custAddS3DualStack(p)
|
||||
custRmIotDataService(p)
|
||||
custFixAppAutoscalingChina(p)
|
||||
custFixAppAutoscalingUsGov(p)
|
||||
}
|
||||
|
||||
return ps, nil
|
||||
@@ -149,6 +150,33 @@ func custFixAppAutoscalingChina(p *partition) {
|
||||
p.Services[serviceName] = s
|
||||
}
|
||||
|
||||
func custFixAppAutoscalingUsGov(p *partition) {
|
||||
if p.ID != "aws-us-gov" {
|
||||
return
|
||||
}
|
||||
|
||||
const serviceName = "application-autoscaling"
|
||||
s, ok := p.Services[serviceName]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if a := s.Defaults.CredentialScope.Service; a != "" {
|
||||
fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a)
|
||||
return
|
||||
}
|
||||
|
||||
if a := s.Defaults.Hostname; a != "" {
|
||||
fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a)
|
||||
return
|
||||
}
|
||||
|
||||
s.Defaults.CredentialScope.Service = "application-autoscaling"
|
||||
s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com"
|
||||
|
||||
p.Services[serviceName] = s
|
||||
}
|
||||
|
||||
type decodeModelError struct {
|
||||
awsError
|
||||
}
|
||||
|
||||
595
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
595
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
File diff suppressed because it is too large
Load Diff
141
vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
generated
vendored
Normal file
141
vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
package endpoints
|
||||
|
||||
// Service identifiers
|
||||
//
|
||||
// Deprecated: Use client package's EndpointID value instead of these
|
||||
// ServiceIDs. These IDs are not maintained, and are out of date.
|
||||
const (
|
||||
A4bServiceID = "a4b" // A4b.
|
||||
AcmServiceID = "acm" // Acm.
|
||||
AcmPcaServiceID = "acm-pca" // AcmPca.
|
||||
ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
|
||||
ApiPricingServiceID = "api.pricing" // ApiPricing.
|
||||
ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
|
||||
ApigatewayServiceID = "apigateway" // Apigateway.
|
||||
ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
|
||||
Appstream2ServiceID = "appstream2" // Appstream2.
|
||||
AppsyncServiceID = "appsync" // Appsync.
|
||||
AthenaServiceID = "athena" // Athena.
|
||||
AutoscalingServiceID = "autoscaling" // Autoscaling.
|
||||
AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
|
||||
BatchServiceID = "batch" // Batch.
|
||||
BudgetsServiceID = "budgets" // Budgets.
|
||||
CeServiceID = "ce" // Ce.
|
||||
ChimeServiceID = "chime" // Chime.
|
||||
Cloud9ServiceID = "cloud9" // Cloud9.
|
||||
ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
|
||||
CloudformationServiceID = "cloudformation" // Cloudformation.
|
||||
CloudfrontServiceID = "cloudfront" // Cloudfront.
|
||||
CloudhsmServiceID = "cloudhsm" // Cloudhsm.
|
||||
Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
|
||||
CloudsearchServiceID = "cloudsearch" // Cloudsearch.
|
||||
CloudtrailServiceID = "cloudtrail" // Cloudtrail.
|
||||
CodebuildServiceID = "codebuild" // Codebuild.
|
||||
CodecommitServiceID = "codecommit" // Codecommit.
|
||||
CodedeployServiceID = "codedeploy" // Codedeploy.
|
||||
CodepipelineServiceID = "codepipeline" // Codepipeline.
|
||||
CodestarServiceID = "codestar" // Codestar.
|
||||
CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
|
||||
CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
|
||||
CognitoSyncServiceID = "cognito-sync" // CognitoSync.
|
||||
ComprehendServiceID = "comprehend" // Comprehend.
|
||||
ConfigServiceID = "config" // Config.
|
||||
CurServiceID = "cur" // Cur.
|
||||
DatapipelineServiceID = "datapipeline" // Datapipeline.
|
||||
DaxServiceID = "dax" // Dax.
|
||||
DevicefarmServiceID = "devicefarm" // Devicefarm.
|
||||
DirectconnectServiceID = "directconnect" // Directconnect.
|
||||
DiscoveryServiceID = "discovery" // Discovery.
|
||||
DmsServiceID = "dms" // Dms.
|
||||
DsServiceID = "ds" // Ds.
|
||||
DynamodbServiceID = "dynamodb" // Dynamodb.
|
||||
Ec2ServiceID = "ec2" // Ec2.
|
||||
Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
|
||||
EcrServiceID = "ecr" // Ecr.
|
||||
EcsServiceID = "ecs" // Ecs.
|
||||
ElasticacheServiceID = "elasticache" // Elasticache.
|
||||
ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
|
||||
ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
|
||||
ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
|
||||
ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
|
||||
ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
|
||||
EmailServiceID = "email" // Email.
|
||||
EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
|
||||
EsServiceID = "es" // Es.
|
||||
EventsServiceID = "events" // Events.
|
||||
FirehoseServiceID = "firehose" // Firehose.
|
||||
FmsServiceID = "fms" // Fms.
|
||||
GameliftServiceID = "gamelift" // Gamelift.
|
||||
GlacierServiceID = "glacier" // Glacier.
|
||||
GlueServiceID = "glue" // Glue.
|
||||
GreengrassServiceID = "greengrass" // Greengrass.
|
||||
GuarddutyServiceID = "guardduty" // Guardduty.
|
||||
HealthServiceID = "health" // Health.
|
||||
IamServiceID = "iam" // Iam.
|
||||
ImportexportServiceID = "importexport" // Importexport.
|
||||
InspectorServiceID = "inspector" // Inspector.
|
||||
IotServiceID = "iot" // Iot.
|
||||
IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
|
||||
KinesisServiceID = "kinesis" // Kinesis.
|
||||
KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
|
||||
KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
|
||||
KmsServiceID = "kms" // Kms.
|
||||
LambdaServiceID = "lambda" // Lambda.
|
||||
LightsailServiceID = "lightsail" // Lightsail.
|
||||
LogsServiceID = "logs" // Logs.
|
||||
MachinelearningServiceID = "machinelearning" // Machinelearning.
|
||||
MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
|
||||
MediaconvertServiceID = "mediaconvert" // Mediaconvert.
|
||||
MedialiveServiceID = "medialive" // Medialive.
|
||||
MediapackageServiceID = "mediapackage" // Mediapackage.
|
||||
MediastoreServiceID = "mediastore" // Mediastore.
|
||||
MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
|
||||
MghServiceID = "mgh" // Mgh.
|
||||
MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
|
||||
ModelsLexServiceID = "models.lex" // ModelsLex.
|
||||
MonitoringServiceID = "monitoring" // Monitoring.
|
||||
MturkRequesterServiceID = "mturk-requester" // MturkRequester.
|
||||
NeptuneServiceID = "neptune" // Neptune.
|
||||
OpsworksServiceID = "opsworks" // Opsworks.
|
||||
OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
|
||||
OrganizationsServiceID = "organizations" // Organizations.
|
||||
PinpointServiceID = "pinpoint" // Pinpoint.
|
||||
PollyServiceID = "polly" // Polly.
|
||||
RdsServiceID = "rds" // Rds.
|
||||
RedshiftServiceID = "redshift" // Redshift.
|
||||
RekognitionServiceID = "rekognition" // Rekognition.
|
||||
ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
|
||||
Route53ServiceID = "route53" // Route53.
|
||||
Route53domainsServiceID = "route53domains" // Route53domains.
|
||||
RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
|
||||
RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
|
||||
S3ServiceID = "s3" // S3.
|
||||
S3ControlServiceID = "s3-control" // S3Control.
|
||||
SagemakerServiceID = "api.sagemaker" // Sagemaker.
|
||||
SdbServiceID = "sdb" // Sdb.
|
||||
SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
|
||||
ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
|
||||
ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
|
||||
ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
|
||||
ShieldServiceID = "shield" // Shield.
|
||||
SmsServiceID = "sms" // Sms.
|
||||
SnowballServiceID = "snowball" // Snowball.
|
||||
SnsServiceID = "sns" // Sns.
|
||||
SqsServiceID = "sqs" // Sqs.
|
||||
SsmServiceID = "ssm" // Ssm.
|
||||
StatesServiceID = "states" // States.
|
||||
StoragegatewayServiceID = "storagegateway" // Storagegateway.
|
||||
StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
|
||||
StsServiceID = "sts" // Sts.
|
||||
SupportServiceID = "support" // Support.
|
||||
SwfServiceID = "swf" // Swf.
|
||||
TaggingServiceID = "tagging" // Tagging.
|
||||
TransferServiceID = "transfer" // Transfer.
|
||||
TranslateServiceID = "translate" // Translate.
|
||||
WafServiceID = "waf" // Waf.
|
||||
WafRegionalServiceID = "waf-regional" // WafRegional.
|
||||
WorkdocsServiceID = "workdocs" // Workdocs.
|
||||
WorkmailServiceID = "workmail" // Workmail.
|
||||
WorkspacesServiceID = "workspaces" // Workspaces.
|
||||
XrayServiceID = "xray" // Xray.
|
||||
)
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
@@ -35,7 +35,7 @@ type Options struct {
|
||||
//
|
||||
// If resolving an endpoint on the partition list the provided region will
|
||||
// be used to determine which partition's domain name pattern to the service
|
||||
// endpoint ID with. If both the service and region are unkonwn and resolving
|
||||
// endpoint ID with. If both the service and region are unknown and resolving
|
||||
// the endpoint on partition list an UnknownEndpointError error will be returned.
|
||||
//
|
||||
// If resolving and endpoint on a partition specific resolver that partition's
|
||||
|
||||
24
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
generated
vendored
24
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
generated
vendored
@@ -16,6 +16,10 @@ import (
|
||||
type CodeGenOptions struct {
|
||||
// Options for how the model will be decoded.
|
||||
DecodeModelOptions DecodeModelOptions
|
||||
|
||||
// Disables code generation of the service endpoint prefix IDs defined in
|
||||
// the model.
|
||||
DisableGenerateServiceIDs bool
|
||||
}
|
||||
|
||||
// Set combines all of the option functions together
|
||||
@@ -39,8 +43,16 @@ func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGe
|
||||
return err
|
||||
}
|
||||
|
||||
v := struct {
|
||||
Resolver
|
||||
CodeGenOptions
|
||||
}{
|
||||
Resolver: resolver,
|
||||
CodeGenOptions: opts,
|
||||
}
|
||||
|
||||
tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
|
||||
if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil {
|
||||
if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil {
|
||||
return fmt.Errorf("failed to execute template, %v", err)
|
||||
}
|
||||
|
||||
@@ -166,15 +178,17 @@ import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
{{ template "partition consts" . }}
|
||||
{{ template "partition consts" $.Resolver }}
|
||||
|
||||
{{ range $_, $partition := . }}
|
||||
{{ range $_, $partition := $.Resolver }}
|
||||
{{ template "partition region consts" $partition }}
|
||||
{{ end }}
|
||||
|
||||
{{ template "service consts" . }}
|
||||
{{ if not $.DisableGenerateServiceIDs -}}
|
||||
{{ template "service consts" $.Resolver }}
|
||||
{{- end }}
|
||||
|
||||
{{ template "endpoint resolvers" . }}
|
||||
{{ template "endpoint resolvers" $.Resolver }}
|
||||
{{- end }}
|
||||
|
||||
{{ define "partition consts" }}
|
||||
|
||||
3
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
@@ -19,6 +19,7 @@ type Handlers struct {
|
||||
UnmarshalError HandlerList
|
||||
Retry HandlerList
|
||||
AfterRetry HandlerList
|
||||
CompleteAttempt HandlerList
|
||||
Complete HandlerList
|
||||
}
|
||||
|
||||
@@ -36,6 +37,7 @@ func (h *Handlers) Copy() Handlers {
|
||||
UnmarshalMeta: h.UnmarshalMeta.copy(),
|
||||
Retry: h.Retry.copy(),
|
||||
AfterRetry: h.AfterRetry.copy(),
|
||||
CompleteAttempt: h.CompleteAttempt.copy(),
|
||||
Complete: h.Complete.copy(),
|
||||
}
|
||||
}
|
||||
@@ -53,6 +55,7 @@ func (h *Handlers) Clear() {
|
||||
h.ValidateResponse.Clear()
|
||||
h.Retry.Clear()
|
||||
h.AfterRetry.Clear()
|
||||
h.CompleteAttempt.Clear()
|
||||
h.Complete.Clear()
|
||||
}
|
||||
|
||||
|
||||
184
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
184
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
@@ -122,7 +121,6 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
|
||||
Handlers: handlers.Copy(),
|
||||
|
||||
Retryer: retryer,
|
||||
AttemptTime: time.Now(),
|
||||
Time: time.Now(),
|
||||
ExpireTime: 0,
|
||||
Operation: operation,
|
||||
@@ -266,9 +264,9 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
||||
}
|
||||
|
||||
// Presign returns the request's signed URL. Error will be returned
|
||||
// if the signing fails. The expire parameter is only used for presigned Amazon
|
||||
// S3 API requests. All other AWS services will use a fixed expriation
|
||||
// time of 15 minutes.
|
||||
// if the signing fails. The expire parameter is only used for presigned Amazon
|
||||
// S3 API requests. All other AWS services will use a fixed expiration
|
||||
// time of 15 minutes.
|
||||
//
|
||||
// It is invalid to create a presigned URL with a expire duration 0 or less. An
|
||||
// error is returned if expire duration is 0 or less.
|
||||
@@ -287,7 +285,7 @@ func (r *Request) Presign(expire time.Duration) (string, error) {
|
||||
// PresignRequest behaves just like presign, with the addition of returning a
|
||||
// set of headers that were signed. The expire parameter is only used for
|
||||
// presigned Amazon S3 API requests. All other AWS services will use a fixed
|
||||
// expriation time of 15 minutes.
|
||||
// expiration time of 15 minutes.
|
||||
//
|
||||
// It is invalid to create a presigned URL with a expire duration 0 or less. An
|
||||
// error is returned if expire duration is 0 or less.
|
||||
@@ -466,80 +464,78 @@ func (r *Request) Send() error {
|
||||
r.Handlers.Complete.Run(r)
|
||||
}()
|
||||
|
||||
if err := r.Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
r.Error = nil
|
||||
r.AttemptTime = time.Now()
|
||||
if aws.BoolValue(r.Retryable) {
|
||||
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
|
||||
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
|
||||
}
|
||||
|
||||
// The previous http.Request will have a reference to the r.Body
|
||||
// and the HTTP Client's Transport may still be reading from
|
||||
// the request's body even though the Client's Do returned.
|
||||
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
|
||||
r.ResetBody()
|
||||
|
||||
// Closing response body to ensure that no response body is leaked
|
||||
// between retry attempts.
|
||||
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
|
||||
r.HTTPResponse.Body.Close()
|
||||
}
|
||||
if err := r.Sign(); err != nil {
|
||||
debugLogReqError(r, "Sign Request", false, err)
|
||||
return err
|
||||
}
|
||||
|
||||
r.Sign()
|
||||
if r.Error != nil {
|
||||
return r.Error
|
||||
}
|
||||
|
||||
r.Retryable = nil
|
||||
|
||||
r.Handlers.Send.Run(r)
|
||||
if r.Error != nil {
|
||||
if !shouldRetryCancel(r) {
|
||||
return r.Error
|
||||
}
|
||||
|
||||
err := r.Error
|
||||
if err := r.sendRequest(); err == nil {
|
||||
return nil
|
||||
} else if !shouldRetryCancel(r.Error) {
|
||||
return err
|
||||
} else {
|
||||
r.Handlers.Retry.Run(r)
|
||||
r.Handlers.AfterRetry.Run(r)
|
||||
if r.Error != nil {
|
||||
debugLogReqError(r, "Send Request", false, err)
|
||||
|
||||
if r.Error != nil || !aws.BoolValue(r.Retryable) {
|
||||
return r.Error
|
||||
}
|
||||
debugLogReqError(r, "Send Request", true, err)
|
||||
|
||||
r.prepareRetry()
|
||||
continue
|
||||
}
|
||||
r.Handlers.UnmarshalMeta.Run(r)
|
||||
r.Handlers.ValidateResponse.Run(r)
|
||||
if r.Error != nil {
|
||||
r.Handlers.UnmarshalError.Run(r)
|
||||
err := r.Error
|
||||
}
|
||||
}
|
||||
|
||||
r.Handlers.Retry.Run(r)
|
||||
r.Handlers.AfterRetry.Run(r)
|
||||
if r.Error != nil {
|
||||
debugLogReqError(r, "Validate Response", false, err)
|
||||
return r.Error
|
||||
}
|
||||
debugLogReqError(r, "Validate Response", true, err)
|
||||
continue
|
||||
}
|
||||
func (r *Request) prepareRetry() {
|
||||
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
|
||||
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
|
||||
}
|
||||
|
||||
r.Handlers.Unmarshal.Run(r)
|
||||
if r.Error != nil {
|
||||
err := r.Error
|
||||
r.Handlers.Retry.Run(r)
|
||||
r.Handlers.AfterRetry.Run(r)
|
||||
if r.Error != nil {
|
||||
debugLogReqError(r, "Unmarshal Response", false, err)
|
||||
return r.Error
|
||||
}
|
||||
debugLogReqError(r, "Unmarshal Response", true, err)
|
||||
continue
|
||||
}
|
||||
// The previous http.Request will have a reference to the r.Body
|
||||
// and the HTTP Client's Transport may still be reading from
|
||||
// the request's body even though the Client's Do returned.
|
||||
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
|
||||
r.ResetBody()
|
||||
|
||||
break
|
||||
// Closing response body to ensure that no response body is leaked
|
||||
// between retry attempts.
|
||||
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
|
||||
r.HTTPResponse.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Request) sendRequest() (sendErr error) {
|
||||
defer r.Handlers.CompleteAttempt.Run(r)
|
||||
|
||||
r.Retryable = nil
|
||||
r.Handlers.Send.Run(r)
|
||||
if r.Error != nil {
|
||||
debugLogReqError(r, "Send Request", r.WillRetry(), r.Error)
|
||||
return r.Error
|
||||
}
|
||||
|
||||
r.Handlers.UnmarshalMeta.Run(r)
|
||||
r.Handlers.ValidateResponse.Run(r)
|
||||
if r.Error != nil {
|
||||
r.Handlers.UnmarshalError.Run(r)
|
||||
debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error)
|
||||
return r.Error
|
||||
}
|
||||
|
||||
r.Handlers.Unmarshal.Run(r)
|
||||
if r.Error != nil {
|
||||
debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error)
|
||||
return r.Error
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -565,30 +561,46 @@ func AddToUserAgent(r *Request, s string) {
|
||||
r.HTTPRequest.Header.Set("User-Agent", s)
|
||||
}
|
||||
|
||||
func shouldRetryCancel(r *Request) bool {
|
||||
awsErr, ok := r.Error.(awserr.Error)
|
||||
timeoutErr := false
|
||||
errStr := r.Error.Error()
|
||||
if ok {
|
||||
if awsErr.Code() == CanceledErrorCode {
|
||||
type temporary interface {
|
||||
Temporary() bool
|
||||
}
|
||||
|
||||
func shouldRetryCancel(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case awserr.Error:
|
||||
if err.Code() == CanceledErrorCode {
|
||||
return false
|
||||
}
|
||||
err := awsErr.OrigErr()
|
||||
netErr, netOK := err.(net.Error)
|
||||
timeoutErr = netOK && netErr.Temporary()
|
||||
if urlErr, ok := err.(*url.Error); !timeoutErr && ok {
|
||||
errStr = urlErr.Err.Error()
|
||||
return shouldRetryCancel(err.OrigErr())
|
||||
case *url.Error:
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
// Refused connections should be retried as the service may not yet
|
||||
// be running on the port. Go TCP dial considers refused
|
||||
// connections as not temporary.
|
||||
return true
|
||||
}
|
||||
// *url.Error only implements Temporary after golang 1.6 but since
|
||||
// url.Error only wraps the error:
|
||||
return shouldRetryCancel(err.Err)
|
||||
case temporary:
|
||||
// If the error is temporary, we want to allow continuation of the
|
||||
// retry process
|
||||
return err.Temporary()
|
||||
case nil:
|
||||
// `awserr.Error.OrigErr()` can be nil, meaning there was an error but
|
||||
// because we don't know the cause, it is marked as retriable. See
|
||||
// TestRequest4xxUnretryable for an example.
|
||||
return true
|
||||
default:
|
||||
switch err.Error() {
|
||||
case "net/http: request canceled",
|
||||
"net/http: request canceled while waiting for connection":
|
||||
// known 1.5 error case when an http request is cancelled
|
||||
return false
|
||||
}
|
||||
// here we don't know the error; so we allow a retry.
|
||||
return true
|
||||
}
|
||||
|
||||
// There can be two types of canceled errors here.
|
||||
// The first being a net.Error and the other being an error.
|
||||
// If the request was timed out, we want to continue the retry
|
||||
// process. Otherwise, return the canceled error.
|
||||
return timeoutErr ||
|
||||
(errStr != "net/http: request canceled" &&
|
||||
errStr != "net/http: request canceled while waiting for connection")
|
||||
|
||||
}
|
||||
|
||||
// SanitizeHostForHeader removes default port from host and updates request.Host
|
||||
|
||||
1
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
@@ -40,6 +40,7 @@ var throttleCodes = map[string]struct{}{
|
||||
"RequestThrottled": {},
|
||||
"TooManyRequestsException": {}, // Lambda functions
|
||||
"PriorRequestNotComplete": {}, // Route53
|
||||
"TransactionInProgressException": {},
|
||||
}
|
||||
|
||||
// credsExpiredCodes is a collection of error codes which signify the credentials
|
||||
|
||||
25
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
25
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
@@ -17,6 +17,8 @@ const (
|
||||
ParamMinValueErrCode = "ParamMinValueError"
|
||||
// ParamMinLenErrCode is the error code for fields without enough elements.
|
||||
ParamMinLenErrCode = "ParamMinLenError"
|
||||
// ParamMaxLenErrCode is the error code for value being too long.
|
||||
ParamMaxLenErrCode = "ParamMaxLenError"
|
||||
|
||||
// ParamFormatErrCode is the error code for a field with invalid
|
||||
// format or characters.
|
||||
@@ -237,6 +239,29 @@ func (e *ErrParamMinLen) MinLen() int {
|
||||
return e.min
|
||||
}
|
||||
|
||||
// An ErrParamMaxLen represents a maximum length parameter error.
|
||||
type ErrParamMaxLen struct {
|
||||
errInvalidParam
|
||||
max int
|
||||
}
|
||||
|
||||
// NewErrParamMaxLen creates a new maximum length parameter error.
|
||||
func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen {
|
||||
return &ErrParamMaxLen{
|
||||
errInvalidParam: errInvalidParam{
|
||||
code: ParamMaxLenErrCode,
|
||||
field: field,
|
||||
msg: fmt.Sprintf("maximum size of %v, %v", max, value),
|
||||
},
|
||||
max: max,
|
||||
}
|
||||
}
|
||||
|
||||
// MaxLen returns the field's required minimum length.
|
||||
func (e *ErrParamMaxLen) MaxLen() int {
|
||||
return e.max
|
||||
}
|
||||
|
||||
// An ErrParamFormat represents a invalid format parameter error.
|
||||
type ErrParamFormat struct {
|
||||
errInvalidParam
|
||||
|
||||
4
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
@@ -99,7 +99,7 @@ handler logs every request and its payload made by a service client:
|
||||
|
||||
sess.Handlers.Send.PushFront(func(r *request.Request) {
|
||||
// Log every request made and its payload
|
||||
logger.Println("Request: %s/%s, Payload: %s",
|
||||
logger.Printf("Request: %s/%s, Payload: %s",
|
||||
r.ClientInfo.ServiceName, r.Operation, r.Params)
|
||||
})
|
||||
|
||||
@@ -183,7 +183,7 @@ be returned when creating the session.
|
||||
// from assumed role.
|
||||
svc := s3.New(sess)
|
||||
|
||||
To setup assume role outside of a session see the stscrds.AssumeRoleProvider
|
||||
To setup assume role outside of a session see the stscreds.AssumeRoleProvider
|
||||
documentation.
|
||||
|
||||
Environment Variables
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
@@ -80,7 +80,7 @@ type envConfig struct {
|
||||
// AWS_CONFIG_FILE=$HOME/my_shared_config
|
||||
SharedConfigFile string
|
||||
|
||||
// Sets the path to a custom Credentials Authroity (CA) Bundle PEM file
|
||||
// Sets the path to a custom Credentials Authority (CA) Bundle PEM file
|
||||
// that the SDK will use instead of the system's root CA bundle.
|
||||
// Only use this if you want to configure the SDK to use a custom set
|
||||
// of CAs.
|
||||
|
||||
5
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/processcreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/csm"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
@@ -534,6 +535,10 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
|
||||
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
|
||||
sharedCfg.Creds,
|
||||
)
|
||||
} else if len(sharedCfg.CredentialProcess) > 0 {
|
||||
cfg.Credentials = processcreds.NewCredentials(
|
||||
sharedCfg.CredentialProcess,
|
||||
)
|
||||
} else {
|
||||
// Fallback to default credentials provider, include mock errors
|
||||
// for the credential chain so user can identify why credentials
|
||||
|
||||
10
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
@@ -28,6 +28,8 @@ const (
|
||||
|
||||
// endpoint discovery group
|
||||
enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
|
||||
// External Credential Process
|
||||
credentialProcessKey = `credential_process`
|
||||
|
||||
// DefaultSharedConfigProfile is the default profile to be used when
|
||||
// loading configuration from the config files if another profile name
|
||||
@@ -60,6 +62,9 @@ type sharedConfig struct {
|
||||
AssumeRole assumeRoleConfig
|
||||
AssumeRoleSource *sharedConfig
|
||||
|
||||
// An external process to request credentials
|
||||
CredentialProcess string
|
||||
|
||||
// Region is the region the SDK should use for looking up AWS service endpoints
|
||||
// and signing requests.
|
||||
//
|
||||
@@ -223,6 +228,11 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e
|
||||
}
|
||||
}
|
||||
|
||||
// `credential_process`
|
||||
if credProc := section.String(credentialProcessKey); len(credProc) > 0 {
|
||||
cfg.CredentialProcess = credProc
|
||||
}
|
||||
|
||||
// Region
|
||||
if v := section.String(regionKey); len(v) > 0 {
|
||||
cfg.Region = v
|
||||
|
||||
63
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
63
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
@@ -98,25 +98,25 @@ var ignoredHeaders = rules{
|
||||
var requiredSignedHeaders = rules{
|
||||
whitelist{
|
||||
mapRule{
|
||||
"Cache-Control": struct{}{},
|
||||
"Content-Disposition": struct{}{},
|
||||
"Content-Encoding": struct{}{},
|
||||
"Content-Language": struct{}{},
|
||||
"Content-Md5": struct{}{},
|
||||
"Content-Type": struct{}{},
|
||||
"Expires": struct{}{},
|
||||
"If-Match": struct{}{},
|
||||
"If-Modified-Since": struct{}{},
|
||||
"If-None-Match": struct{}{},
|
||||
"If-Unmodified-Since": struct{}{},
|
||||
"Range": struct{}{},
|
||||
"X-Amz-Acl": struct{}{},
|
||||
"X-Amz-Copy-Source": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-Range": struct{}{},
|
||||
"Cache-Control": struct{}{},
|
||||
"Content-Disposition": struct{}{},
|
||||
"Content-Encoding": struct{}{},
|
||||
"Content-Language": struct{}{},
|
||||
"Content-Md5": struct{}{},
|
||||
"Content-Type": struct{}{},
|
||||
"Expires": struct{}{},
|
||||
"If-Match": struct{}{},
|
||||
"If-Modified-Since": struct{}{},
|
||||
"If-None-Match": struct{}{},
|
||||
"If-Unmodified-Since": struct{}{},
|
||||
"Range": struct{}{},
|
||||
"X-Amz-Acl": struct{}{},
|
||||
"X-Amz-Copy-Source": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-Range": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||
@@ -134,7 +134,7 @@ var requiredSignedHeaders = rules{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||
"X-Amz-Storage-Class": struct{}{},
|
||||
"X-Amz-Tagging": struct{}{},
|
||||
"X-Amz-Tagging": struct{}{},
|
||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
||||
"X-Amz-Content-Sha256": struct{}{},
|
||||
},
|
||||
@@ -182,7 +182,7 @@ type Signer struct {
|
||||
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
||||
DisableURIPathEscaping bool
|
||||
|
||||
// Disales the automatical setting of the HTTP request's Body field with the
|
||||
// Disables the automatical setting of the HTTP request's Body field with the
|
||||
// io.ReadSeeker passed in to the signer. This is useful if you're using a
|
||||
// custom wrapper around the body for the io.ReadSeeker and want to preserve
|
||||
// the Body value on the Request.Body.
|
||||
@@ -422,7 +422,7 @@ var SignRequestHandler = request.NamedHandler{
|
||||
// If the credentials of the request's config are set to
|
||||
// credentials.AnonymousCredentials the request will not be signed.
|
||||
func SignSDKRequest(req *request.Request) {
|
||||
signSDKRequestWithCurrTime(req, time.Now)
|
||||
SignSDKRequestWithCurrentTime(req, time.Now)
|
||||
}
|
||||
|
||||
// BuildNamedHandler will build a generic handler for signing.
|
||||
@@ -430,12 +430,15 @@ func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler
|
||||
return request.NamedHandler{
|
||||
Name: name,
|
||||
Fn: func(req *request.Request) {
|
||||
signSDKRequestWithCurrTime(req, time.Now, opts...)
|
||||
SignSDKRequestWithCurrentTime(req, time.Now, opts...)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
|
||||
// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
|
||||
// function passed in. Behaves the same as SignSDKRequest with the exception
|
||||
// the request is signed with the value returned by the current time function.
|
||||
func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
|
||||
// If the request does not need to be signed ignore the signing of the
|
||||
// request if the AnonymousCredentials object is used.
|
||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||
@@ -471,13 +474,9 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
|
||||
opt(v4)
|
||||
}
|
||||
|
||||
signingTime := req.Time
|
||||
if !req.LastSignedAt.IsZero() {
|
||||
signingTime = req.LastSignedAt
|
||||
}
|
||||
|
||||
curTime := curTimeFn()
|
||||
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
|
||||
name, region, req.ExpireTime, req.ExpireTime > 0, signingTime,
|
||||
name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
|
||||
)
|
||||
if err != nil {
|
||||
req.Error = err
|
||||
@@ -486,7 +485,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
|
||||
}
|
||||
|
||||
req.SignedHeaderVals = signedHeaders
|
||||
req.LastSignedAt = curTimeFn()
|
||||
req.LastSignedAt = curTime
|
||||
}
|
||||
|
||||
const logSignInfoMsg = `DEBUG: Request Signature:
|
||||
@@ -755,7 +754,7 @@ func makeSha256Reader(reader io.ReadSeeker) []byte {
|
||||
const doubleSpace = " "
|
||||
|
||||
// stripExcessSpaces will rewrite the passed in slice's string values to not
|
||||
// contain muliple side-by-side spaces.
|
||||
// contain multiple side-by-side spaces.
|
||||
func stripExcessSpaces(vals []string) {
|
||||
var j, k, l, m, spaces int
|
||||
for i, str := range vals {
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.15.81"
|
||||
const SDKVersion = "1.16.31"
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
generated
vendored
@@ -317,7 +317,7 @@ loop:
|
||||
return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container))
|
||||
}
|
||||
|
||||
// returns a sublist which exludes the start symbol
|
||||
// returns a sublist which excludes the start symbol
|
||||
return stack.List(), nil
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
generated
vendored
@@ -15,7 +15,7 @@ func newExprStatement(ast AST) AST {
|
||||
return newAST(ASTKindExprStatement, ast)
|
||||
}
|
||||
|
||||
// CommentStatement represents a comment in the ini defintion.
|
||||
// CommentStatement represents a comment in the ini definition.
|
||||
//
|
||||
// grammar:
|
||||
// comment -> #comment' | ;comment'
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
generated
vendored
@@ -7,6 +7,6 @@ const (
|
||||
)
|
||||
|
||||
// ECSContainerCredentialsURI is the endpoint to retrieve container
|
||||
// credentials. This can be overriden to test to ensure the credential process
|
||||
// credentials. This can be overridden to test to ensure the credential process
|
||||
// is behaving correctly.
|
||||
var ECSContainerCredentialsURI = "http://169.254.170.2"
|
||||
|
||||
51
vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
generated
vendored
51
vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
generated
vendored
@@ -1,7 +1,54 @@
|
||||
package protocol
|
||||
|
||||
// ValidHostLabel returns if the label is a valid RFC 1123 Section 2.1 domain
|
||||
// host label name.
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// ValidateEndpointHostHandler is a request handler that will validate the
|
||||
// request endpoint's hosts is a valid RFC 3986 host.
|
||||
var ValidateEndpointHostHandler = request.NamedHandler{
|
||||
Name: "awssdk.protocol.ValidateEndpointHostHandler",
|
||||
Fn: func(r *request.Request) {
|
||||
err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host)
|
||||
if err != nil {
|
||||
r.Error = err
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// ValidateEndpointHost validates that the host string passed in is a valid RFC
|
||||
// 3986 host. Returns error if the host is not valid.
|
||||
func ValidateEndpointHost(opName, host string) error {
|
||||
paramErrs := request.ErrInvalidParams{Context: opName}
|
||||
labels := strings.Split(host, ".")
|
||||
|
||||
for i, label := range labels {
|
||||
if i == len(labels)-1 && len(label) == 0 {
|
||||
// Allow trailing dot for FQDN hosts.
|
||||
continue
|
||||
}
|
||||
|
||||
if !ValidHostLabel(label) {
|
||||
paramErrs.Add(request.NewErrParamFormat(
|
||||
"endpoint host label", "[a-zA-Z0-9-]{1,63}", label))
|
||||
}
|
||||
}
|
||||
|
||||
if len(host) > 255 {
|
||||
paramErrs.Add(request.NewErrParamMaxLen(
|
||||
"endpoint host", 255, host,
|
||||
))
|
||||
}
|
||||
|
||||
if paramErrs.Len() > 0 {
|
||||
return paramErrs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidHostLabel returns if the label is a valid RFC 3986 host label.
|
||||
func ValidHostLabel(label string) bool {
|
||||
if l := len(label); l == 0 || l > 63 {
|
||||
return false
|
||||
|
||||
54
vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
generated
vendored
Normal file
54
vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// HostPrefixHandlerName is the handler name for the host prefix request
|
||||
// handler.
|
||||
const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler"
|
||||
|
||||
// NewHostPrefixHandler constructs a build handler
|
||||
func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler {
|
||||
builder := HostPrefixBuilder{
|
||||
Prefix: prefix,
|
||||
LabelsFn: labelsFn,
|
||||
}
|
||||
|
||||
return request.NamedHandler{
|
||||
Name: HostPrefixHandlerName,
|
||||
Fn: builder.Build,
|
||||
}
|
||||
}
|
||||
|
||||
// HostPrefixBuilder provides the request handler to expand and prepend
|
||||
// the host prefix into the operation's request endpoint host.
|
||||
type HostPrefixBuilder struct {
|
||||
Prefix string
|
||||
LabelsFn func() map[string]string
|
||||
}
|
||||
|
||||
// Build updates the passed in Request with the HostPrefix template expanded.
|
||||
func (h HostPrefixBuilder) Build(r *request.Request) {
|
||||
if aws.BoolValue(r.Config.DisableEndpointHostPrefix) {
|
||||
return
|
||||
}
|
||||
|
||||
var labels map[string]string
|
||||
if h.LabelsFn != nil {
|
||||
labels = h.LabelsFn()
|
||||
}
|
||||
|
||||
prefix := h.Prefix
|
||||
for name, value := range labels {
|
||||
prefix = strings.Replace(prefix, "{"+name+"}", value, -1)
|
||||
}
|
||||
|
||||
r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host
|
||||
if len(r.HTTPRequest.Host) > 0 {
|
||||
r.HTTPRequest.Host = prefix + r.HTTPRequest.Host
|
||||
}
|
||||
}
|
||||
1988
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
1988
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
@@ -33,6 +33,7 @@ func defaultInitRequestFn(r *request.Request) {
|
||||
switch r.Operation.Name {
|
||||
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
|
||||
opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
|
||||
opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration,
|
||||
opPutBucketReplication:
|
||||
// These S3 operations require Content-MD5 to be set
|
||||
r.Handlers.Build.PushBack(contentMD5)
|
||||
|
||||
24
vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
generated
vendored
24
vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
generated
vendored
@@ -228,6 +228,18 @@ type S3API interface {
|
||||
GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error)
|
||||
GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput)
|
||||
|
||||
GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error)
|
||||
GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error)
|
||||
GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput)
|
||||
|
||||
GetObjectLockConfiguration(*s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error)
|
||||
GetObjectLockConfigurationWithContext(aws.Context, *s3.GetObjectLockConfigurationInput, ...request.Option) (*s3.GetObjectLockConfigurationOutput, error)
|
||||
GetObjectLockConfigurationRequest(*s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput)
|
||||
|
||||
GetObjectRetention(*s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error)
|
||||
GetObjectRetentionWithContext(aws.Context, *s3.GetObjectRetentionInput, ...request.Option) (*s3.GetObjectRetentionOutput, error)
|
||||
GetObjectRetentionRequest(*s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput)
|
||||
|
||||
GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error)
|
||||
GetObjectTaggingWithContext(aws.Context, *s3.GetObjectTaggingInput, ...request.Option) (*s3.GetObjectTaggingOutput, error)
|
||||
GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput)
|
||||
@@ -379,6 +391,18 @@ type S3API interface {
|
||||
PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error)
|
||||
PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput)
|
||||
|
||||
PutObjectLegalHold(*s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error)
|
||||
PutObjectLegalHoldWithContext(aws.Context, *s3.PutObjectLegalHoldInput, ...request.Option) (*s3.PutObjectLegalHoldOutput, error)
|
||||
PutObjectLegalHoldRequest(*s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput)
|
||||
|
||||
PutObjectLockConfiguration(*s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error)
|
||||
PutObjectLockConfigurationWithContext(aws.Context, *s3.PutObjectLockConfigurationInput, ...request.Option) (*s3.PutObjectLockConfigurationOutput, error)
|
||||
PutObjectLockConfigurationRequest(*s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput)
|
||||
|
||||
PutObjectRetention(*s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error)
|
||||
PutObjectRetentionWithContext(aws.Context, *s3.PutObjectRetentionInput, ...request.Option) (*s3.PutObjectRetentionOutput, error)
|
||||
PutObjectRetentionRequest(*s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput)
|
||||
|
||||
PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error)
|
||||
PutObjectTaggingWithContext(aws.Context, *s3.PutObjectTaggingInput, ...request.Option) (*s3.PutObjectTaggingOutput, error)
|
||||
PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput)
|
||||
|
||||
95
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
95
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
@@ -6,7 +6,6 @@ import (
|
||||
"io"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
@@ -96,100 +95,6 @@ func (m multiUploadError) UploadID() string {
|
||||
return m.uploadID
|
||||
}
|
||||
|
||||
// UploadInput contains all input for upload requests to Amazon S3.
|
||||
type UploadInput struct {
|
||||
// The canned ACL to apply to the object.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string"`
|
||||
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
// Specifies caching behavior along the request/reply chain.
|
||||
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
|
||||
|
||||
// Specifies presentational information for the object.
|
||||
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
|
||||
|
||||
// Specifies what content encodings have been applied to the object and thus
|
||||
// what decoding mechanisms must be applied to obtain the media-type referenced
|
||||
// by the Content-Type header field.
|
||||
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
|
||||
|
||||
// The language the content is in.
|
||||
ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
|
||||
|
||||
// The base64-encoded 128-bit MD5 digest of the part data.
|
||||
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
|
||||
|
||||
// A standard MIME type describing the format of the object data.
|
||||
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
|
||||
|
||||
// The date and time at which the object is no longer cacheable.
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
|
||||
|
||||
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
|
||||
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
|
||||
|
||||
// Allows grantee to read the object data and its metadata.
|
||||
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
|
||||
|
||||
// Allows grantee to read the object ACL.
|
||||
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
|
||||
|
||||
// Allows grantee to write the ACL for the applicable object.
|
||||
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
|
||||
|
||||
Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
|
||||
|
||||
// A map of metadata to store with the object in S3.
|
||||
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
|
||||
|
||||
// Confirms that the requester knows that she or he will be charged for the
|
||||
// request. Bucket owners need not specify this parameter in their requests.
|
||||
// Documentation on downloading objects from requester pays buckets can be found
|
||||
// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
|
||||
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
|
||||
|
||||
// Specifies the algorithm to use to when encrypting the object (e.g., AES256,
|
||||
// aws:kms).
|
||||
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
|
||||
|
||||
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
|
||||
// data. This value is used to store the object and then it is discarded; Amazon
|
||||
// does not store the encryption key. The key must be appropriate for use with
|
||||
// the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
|
||||
// header.
|
||||
SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
|
||||
|
||||
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
|
||||
// Amazon S3 uses this header for a message integrity check to ensure the encryption
|
||||
// key was transmitted without error.
|
||||
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
|
||||
// Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
|
||||
// requests for an object protected by AWS KMS will fail if not made via SSL
|
||||
// or using SigV4. Documentation on configuring any of the officially supported
|
||||
// AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
|
||||
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
|
||||
|
||||
// The Server-side encryption algorithm used when storing this object in S3
|
||||
// (e.g., AES256, aws:kms).
|
||||
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
|
||||
|
||||
// The type of storage to use for the object. Defaults to 'STANDARD'.
|
||||
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"`
|
||||
|
||||
// The tag-set for the object. The tag-set must be encoded as URL Query parameters
|
||||
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
|
||||
|
||||
// If the bucket is configured as a website, redirects requests for this object
|
||||
// to another object in the same bucket or to an external URL. Amazon S3 stores
|
||||
// the value of this header in the object metadata.
|
||||
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
|
||||
|
||||
// The readable body payload to send to S3.
|
||||
Body io.Reader
|
||||
}
|
||||
|
||||
// UploadOutput represents a response from the Upload() call.
|
||||
type UploadOutput struct {
|
||||
// The URL where the object was uploaded to.
|
||||
|
||||
122
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
generated
vendored
Normal file
122
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package s3manager
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UploadInput provides the input parameters for uploading a stream or buffer
|
||||
// to an object in an Amazon S3 bucket. This type is similar to the s3
|
||||
// package's PutObjectInput with the exception that the Body member is an
|
||||
// io.Reader instead of an io.ReadSeeker.
|
||||
type UploadInput struct {
|
||||
_ struct{} `type:"structure" payload:"Body"`
|
||||
|
||||
// The canned ACL to apply to the object.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
||||
|
||||
// The readable body payload to send to S3.
|
||||
Body io.Reader
|
||||
|
||||
// Name of the bucket to which the PUT operation was initiated.
|
||||
//
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
// Specifies caching behavior along the request/reply chain.
|
||||
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
|
||||
|
||||
// Specifies presentational information for the object.
|
||||
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
|
||||
|
||||
// Specifies what content encodings have been applied to the object and thus
|
||||
// what decoding mechanisms must be applied to obtain the media-type referenced
|
||||
// by the Content-Type header field.
|
||||
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
|
||||
|
||||
// The language the content is in.
|
||||
ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
|
||||
|
||||
// The base64-encoded 128-bit MD5 digest of the part data.
|
||||
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
|
||||
|
||||
// A standard MIME type describing the format of the object data.
|
||||
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
|
||||
|
||||
// The date and time at which the object is no longer cacheable.
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
|
||||
|
||||
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
|
||||
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
|
||||
|
||||
// Allows grantee to read the object data and its metadata.
|
||||
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
|
||||
|
||||
// Allows grantee to read the object ACL.
|
||||
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
|
||||
|
||||
// Allows grantee to write the ACL for the applicable object.
|
||||
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
|
||||
|
||||
// Object key for which the PUT operation was initiated.
|
||||
//
|
||||
// Key is a required field
|
||||
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
|
||||
|
||||
// A map of metadata to store with the object in S3.
|
||||
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
|
||||
|
||||
// The Legal Hold status that you want to apply to the specified object.
|
||||
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
|
||||
|
||||
// The Object Lock mode that you want to apply to this object.
|
||||
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
|
||||
|
||||
// The date and time when you want this object's Object Lock to expire.
|
||||
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
|
||||
|
||||
// Confirms that the requester knows that she or he will be charged for the
|
||||
// request. Bucket owners need not specify this parameter in their requests.
|
||||
// Documentation on downloading objects from requester pays buckets can be found
|
||||
// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
|
||||
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
|
||||
|
||||
// Specifies the algorithm to use to when encrypting the object (e.g., AES256).
|
||||
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
|
||||
|
||||
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
|
||||
// data. This value is used to store the object and then it is discarded; Amazon
|
||||
// does not store the encryption key. The key must be appropriate for use with
|
||||
// the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
|
||||
// header.
|
||||
SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
|
||||
|
||||
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
|
||||
// Amazon S3 uses this header for a message integrity check to ensure the encryption
|
||||
// key was transmitted without error.
|
||||
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
|
||||
// Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
|
||||
// requests for an object protected by AWS KMS will fail if not made via SSL
|
||||
// or using SigV4. Documentation on configuring any of the officially supported
|
||||
// AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
|
||||
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
|
||||
|
||||
// The Server-side encryption algorithm used when storing this object in S3
|
||||
// (e.g., AES256, aws:kms).
|
||||
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
|
||||
|
||||
// The type of storage to use for the object. Defaults to 'STANDARD'.
|
||||
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
|
||||
|
||||
// The tag-set for the object. The tag-set must be encoded as URL Query parameters.
|
||||
// (For example, "Key1=Value1")
|
||||
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
|
||||
|
||||
// If the bucket is configured as a website, redirects requests for this object
|
||||
// to another object in the same bucket or to an external URL. Amazon S3 stores
|
||||
// the value of this header in the object metadata.
|
||||
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
|
||||
}
|
||||
2
vendor/github.com/coreos/bbolt/.gitignore
generated
vendored
2
vendor/github.com/coreos/bbolt/.gitignore
generated
vendored
@@ -2,4 +2,4 @@
|
||||
*.test
|
||||
*.swp
|
||||
/bin/
|
||||
cmd/bolt/bolt
|
||||
cover.out
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user