1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-12 13:34:01 +00:00

Compare commits

..

44 Commits

Author SHA1 Message Date
Nick Craig-Wood
6811629877 mega: fix unmarshal error on download - FIXME need to upstream this fix
Failed to copy: failed to open source object: open download file
    failed: json: cannot unmarshal number -18 into Go struct field
    DownloadResp.e of type uint32

See: https://forum.rclone.org/t/failed-message-what-does-it-mean/9065
2019-03-10 17:11:59 +00:00
Nick Craig-Wood
415eeca6cf drive: fix range requests on 0 length files
Before this change a range request on a 0 length file would fail

    $ rclone cat --head 128 drive:test/emptyfile
    ERROR : open file failed: googleapi: Error 416: Request range not satisfiable, requestedRangeNotSatisfiable

To fix this we remove Range: headers on requests for zero length files.
2019-03-10 15:47:34 +00:00
Nick Craig-Wood
58d9a3e1b5 filter: reload filter when the options are set via the rc - fixes #3018 2019-03-10 13:09:44 +00:00
Nick Craig-Wood
cccadfa7ae rc: add ability for options blocks to register reload functions 2019-03-10 13:09:44 +00:00
ishuah
1b52f8d2a5 copy/sync/move: add --create-empty-src-dirs flag - fixes #2869 2019-03-10 11:56:38 +00:00
Nick Craig-Wood
2078ad68a5 gcs: Allow bucket policy only buckets - fixes #3014
This introduces a new config variable bucket_policy_only.  If this is
set then rclone:

- ignores ACLs set on buckets
- ignores ACLs set on objects
- creates buckets with Bucket Policy Only set
2019-03-10 11:45:42 +00:00
Nick Craig-Wood
368ed9e67d docs: add a FAQ entry about --max-backlog 2019-03-09 16:19:24 +00:00
Nick Craig-Wood
7c30993bb7 Add Fionera to contributors 2019-03-09 16:19:24 +00:00
Fionera
55b9a4ed30 Add ServerSideAcrossConfig Flag and check for it. fixes #2728 2019-03-09 16:18:45 +00:00
jaKa
118a8b949e koofr: implemented a backend for Koofr cloud storage service.
Implemented a Koofr REST API backend.
Added said backend to tests.
Added documentation for said backend.
2019-03-06 13:41:43 +00:00
jaKa
1d14e30383 vendor: add github.com/koofr/go-koofrclient
* added koofr client SDK dep for koofr backend
2019-03-06 13:41:43 +00:00
Nick Craig-Wood
27714e29c3 s3: note incompatibility with CEPH Jewel - fixes #3015 2019-03-06 11:50:37 +00:00
Nick Craig-Wood
9f8e1a1dc5 drive: fix imports of text files
Before this change text file imports were ignored.  This was because
the mime type wasn't matched.

Fix this by adjusting the keys in the mime type maps as well as the
values.

See: https://forum.rclone.org/t/how-to-upload-text-files-to-google-drive-as-google-docs/9014
2019-03-05 17:20:31 +00:00
Nick Craig-Wood
1692c6bd0a vfs: shorten the locking window for vfs/refresh
Before this change we locked the root directory, recursively fetched
the listing, applied it then unlocked the root directory.

After this change we recursively fetch the listing then apply it with
the root directory locked which shortens the time that the root
directory is locked greatly.

With the original method and the new method the subdirectories are
left unlocked and so potentially could be changed leading to
inconsistencies.  This change makes the potential for inconsistencies
slightly worse by leaving the root directory unlocked at a gain of a
much more responsive system while runing vfs/refresh.

See: https://forum.rclone.org/t/rclone-rc-vfs-refresh-locking-directory-being-refreshed/9004
2019-03-05 14:17:42 +00:00
Nick Craig-Wood
d233efbf63 Add marcintustin to contributors 2019-03-01 17:10:26 +00:00
marcintustin
e9a45a5a34 googlecloudstorage: fall back to default application credentials
Fall back to default application credentials when all other credentials sources fail

This change allows users with default application credentials
configured (notably when running on google compute instances) to
dispense with explicitly configuring google cloud storage credentials
in rclone's own configuration.
2019-03-01 18:05:31 +01:00
Nick Craig-Wood
f6eb5c6983 lib/pacer: fix test on macOS 2019-03-01 12:27:33 +00:00
Nick Craig-Wood
2bf19787d5 Add Dr.Rx to contributors 2019-03-01 12:25:16 +00:00
Dr.Rx
0ea3a57ecb azureblob: Enable MD5 checksums when uploading files bigger than the "Cutoff"
This enables MD5 checksum calculation and publication when uploading file above the "Cutoff" limit.
It was explictely ignored in case of multi-block (a.k.a. multipart) uploads to Azure Blob Storage.
2019-03-01 11:12:23 +01:00
Nick Craig-Wood
b353c730d8 vfs: make tests work on remotes which don't support About 2019-02-28 14:05:21 +00:00
Nick Craig-Wood
173dfbd051 vfs: read directory and check for a file before mkdir
Before this change when doing Mkdir the VFS layer could add the new
item to an unread directory which caused confusion.

It could also do mkdir on a file when run on a bucket based remote
which would temporarily overwrite the file with a directory.

Fixes #2993
2019-02-28 14:05:17 +00:00
Nick Craig-Wood
e3bceb9083 operations: fix Overlapping test for Windows native paths 2019-02-28 11:39:32 +00:00
Nick Craig-Wood
52c6b373cc Add calisro to contributors 2019-02-28 10:20:35 +00:00
calisro
0bc0f62277 Recommendation for creating own client ID 2019-02-28 11:20:08 +01:00
Cnly
12c8ee4b4b atexit: allow functions to be unregistered 2019-02-27 23:37:24 +01:00
Nick Craig-Wood
5240f9d1e5 sync: fix integration tests to check correct error 2019-02-27 22:05:16 +00:00
Nick Craig-Wood
997654d77d ncdu: fix display corruption with Chinese characters - #2989 2019-02-27 09:55:28 +00:00
Nick Craig-Wood
f1809451f6 docs: add more examples of config-less usage 2019-02-27 09:41:40 +00:00
Nick Craig-Wood
84c650818e sync: don't allow syncs on overlapping remotes - fixes #2932 2019-02-26 19:25:52 +00:00
Nick Craig-Wood
c5775cf73d fserrors: don't panic on uncomparable errors 2019-02-26 15:39:16 +00:00
Nick Craig-Wood
dca482e058 Add Alexandru Bumbacea to contributors 2019-02-26 15:39:16 +00:00
Nick Craig-Wood
6943169cef Add Six to contributors 2019-02-26 15:38:25 +00:00
Alexandru Bumbacea
4fddec113c sftp: allow custom ssh client config 2019-02-26 16:37:54 +01:00
Six
2114fd8f26 cmd: Fix tab-completion for remotes with underscores in their names 2019-02-26 16:25:45 +01:00
Nick Craig-Wood
63bb6de491 build: update to use go1.12 for the build 2019-02-26 13:18:31 +00:00
Nick Craig-Wood
0a56a168ff bin/get-github-release.go: scrape the downloads page to avoid the API limit
This should fix pull requests build failures which can't use the
github token.
2019-02-25 21:34:59 +00:00
Nick Craig-Wood
88e22087a8 Add Nestar47 to contributors 2019-02-25 21:34:59 +00:00
Nestar47
9404ed703a drive: add docs on team drives and --fast-list eventual consistency 2019-02-25 21:46:27 +01:00
Nick Craig-Wood
c7ecccd5ca mount: remove an obsolete EXPERIMENTAL tag from the docs 2019-02-25 17:53:53 +00:00
Sebastian Bünger
972e27a861 jottacloud: fix token refresh - fixes #2992 2019-02-21 19:26:18 +01:00
Fabian Möller
8f4ea77c07 fs: remove unnecessary pacer warning 2019-02-18 08:42:36 +01:00
Fabian Möller
61616ba864 pacer: make pacer more flexible
Make the pacer package more flexible by extracting the pace calculation
functions into a separate interface. This also allows to move features
that require the fs package like logging and custom errors into the fs
package.

Also add a RetryAfterError sentinel error that can be used to signal a
desired retry time to the Calculator.
2019-02-16 14:38:07 +00:00
Fabian Möller
9ed721a3f6 errors: add lib/errors package 2019-02-16 14:38:07 +00:00
Nick Craig-Wood
0b9d7fec0c lsf: add 'e' format to show encrypted names and 'o' for original IDs
This brings it up to par with lsjson.

This commit also reworks the framework to use ListJSON internally
which removes duplicated code and makes testing easier.
2019-02-14 14:45:35 +00:00
151 changed files with 4260 additions and 7071 deletions

View File

@@ -8,7 +8,7 @@ go:
- 1.9.x
- 1.10.x
- 1.11.x
- 1.12rc1
- 1.12.x
- tip
go_import_path: github.com/ncw/rclone
before_install:
@@ -44,7 +44,7 @@ matrix:
- go: tip
include:
- os: osx
go: 1.12rc1
go: 1.12.x
env: GOTAGS=""
cache:
directories:
@@ -56,5 +56,5 @@ deploy:
on:
repo: ncw/rclone
all_branches: true
go: 1.12rc1
go: 1.12.x
condition: $TRAVIS_PULL_REQUEST == false

View File

@@ -17,8 +17,8 @@ ifneq ($(TAG),$(LAST_TAG))
endif
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
# Run full tests if go >= go1.11
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
# Run full tests if go >= go1.12
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 12)')
BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org

View File

@@ -36,6 +36,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)

View File

@@ -1,821 +0,0 @@
package adb
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
adb "github.com/thinkhy/go-adb"
"github.com/thinkhy/go-adb/wire"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "adb",
Description: "Android Debug Bridge",
NewFs: NewFs,
Options: []fs.Option{{
Name: "serial",
Help: "The device serial to use. Leave empty for auto selection.",
Advanced: true,
}, {
Name: "host",
Default: "localhost",
Help: "The ADB server host.",
Advanced: true,
}, {
Name: "port",
Default: 5037,
Help: "The ADB server port.",
Advanced: true,
}, {
Name: "executable",
Help: "The ADB executable path.",
Advanced: true,
}, {
Name: "copy_links",
Help: "Follow symlinks and copy the pointed to item.",
Default: false,
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Serial string
Host string
Port uint16
Executable string
FollowSymlinks bool `config:"copy_links"`
}
// Fs represents a adb device
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
client *adb.Adb
device *execDevice
statFunc statFunc
statFuncMu sync.Mutex
touchFunc touchFunc
touchFuncMu sync.Mutex
}
// Object describes a adb file
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
size int64
mode os.FileMode
modTime time.Time
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("ADB root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if root == "" {
root = "/"
}
f := &Fs{
name: name,
root: root,
opt: *opt,
statFunc: (*Object).statTry,
touchFunc: (*Object).touchTry,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(f)
f.client, err = adb.NewWithConfig(adb.ServerConfig{
Host: opt.Host,
Port: int(opt.Port),
PathToAdb: opt.Executable,
})
if err != nil {
return nil, errors.Wrapf(err, "Could not configure ADB server")
}
err = f.client.StartServer()
if err != nil {
return nil, errors.Wrapf(err, "Could not start ADB server")
}
serverVersion, err := f.client.ServerVersion()
if err != nil {
return nil, errors.Wrapf(err, "Could not get ADB server version")
}
fs.Debugf(f, "ADB server version: 0x%X", serverVersion)
serials, err := f.client.ListDeviceSerials()
if err != nil {
return nil, errors.Wrapf(err, "Could not get ADB devices")
}
descriptor := adb.AnyDevice()
if opt.Serial != "" {
descriptor = adb.DeviceWithSerial(opt.Serial)
}
if len(serials) > 1 && opt.Serial == "" {
return nil, errors.New("Multiple ADB devices found. Use the serial config to select a specific device")
}
f.device = &execDevice{f.client.Device(descriptor)}
// follow symlinks for root pathes
entry, err := f.newEntryFollowSymlinks("")
switch err {
case nil:
case fs.ErrorObjectNotFound:
default:
return nil, err
}
switch entry.(type) {
case fs.Object:
f.root = path.Dir(f.root)
return f, fs.ErrorIsFile
case nil:
return f, nil
case fs.Directory:
return f, nil
default:
return nil, errors.Errorf("Invalid root entry type %t", entry)
}
}
// Precision of the object storage system
func (f *Fs) Precision() time.Duration {
return 1 * time.Second
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
p := path.Join(f.root, dir)
dirEntries, err := f.device.ListDirEntries(p)
if err != nil {
return nil, errors.Wrap(err, "ListDirEntries")
}
defer fs.CheckClose(dirEntries, &err)
found := false
for dirEntries.Next() {
found = true
dirEntry := dirEntries.Entry()
switch dirEntry.Name {
case ".", "..":
continue
}
fsEntry, err := f.entryForDirEntry(path.Join(dir, dirEntry.Name), dirEntry, f.opt.FollowSymlinks)
if err != nil {
fs.Errorf(p, "Listing error: %q: %v", dirEntry.Name, err)
return nil, err
} else if fsEntry != nil {
entries = append(entries, fsEntry)
} else {
fs.Debugf(f, "Skipping DirEntry %#v", dirEntry)
}
}
err = dirEntries.Err()
if err != nil {
return nil, errors.Wrap(err, "ListDirEntries")
}
if !found {
return nil, fs.ErrorDirNotFound
}
return
}
func (f *Fs) entryForDirEntry(remote string, e *adb.DirEntry, followSymlinks bool) (fs.DirEntry, error) {
o := f.newObjectWithInfo(remote, e)
// Follow symlinks if required
if followSymlinks && (e.Mode&os.ModeSymlink) != 0 {
err := f.statFunc(&o)
if err != nil {
return nil, err
}
}
if o.mode.IsDir() {
return fs.NewDir(remote, o.modTime), nil
}
return &o, nil
}
func (f *Fs) newEntry(remote string) (fs.DirEntry, error) {
return f.newEntryWithFollow(remote, f.opt.FollowSymlinks)
}
func (f *Fs) newEntryFollowSymlinks(remote string) (fs.DirEntry, error) {
return f.newEntryWithFollow(remote, true)
}
func (f *Fs) newEntryWithFollow(remote string, followSymlinks bool) (fs.DirEntry, error) {
entry, err := f.device.Stat(path.Join(f.root, remote))
if err != nil {
if adb.HasErrCode(err, adb.FileNoExistError) {
return nil, fs.ErrorObjectNotFound
}
return nil, errors.Wrapf(err, "Stat failed")
}
return f.entryForDirEntry(remote, entry, followSymlinks)
}
func (f *Fs) newObjectWithInfo(remote string, e *adb.DirEntry) Object {
return Object{
fs: f,
remote: remote,
size: int64(e.Size),
mode: e.Mode,
modTime: e.ModifiedAt,
}
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
entry, err := f.newEntry(remote)
if err != nil {
return nil, err
}
obj, ok := entry.(fs.Object)
if !ok {
return nil, fs.ErrorObjectNotFound
}
return obj, nil
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
// Temporary Object under construction - info filled in by Update()
o := f.newObject(remote)
err := o.Update(in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// newObject makes a half completed Object
func (f *Fs) newObject(remote string) *Object {
return &Object{
fs: f,
remote: remote,
}
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(dir string) error {
p := path.Join(f.root, dir)
output, code, err := f.device.execCommandWithExitCode("mkdir -p", p)
switch err := err.(type) {
case nil:
return nil
case adb.ShellExitError:
entry, _ := f.newEntry(p)
if _, ok := entry.(fs.Directory); ok {
return nil
}
return errors.Errorf("mkdir %q failed with %d: %q", dir, code, output)
default:
return errors.Wrap(err, "mkdir")
}
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(dir string) error {
p := path.Join(f.root, dir)
output, code, err := f.device.execCommandWithExitCode("rmdir", p)
switch err := err.(type) {
case nil:
return nil
case adb.ShellExitError:
return errors.Errorf("rmdir %q failed with %d: %q", dir, code, output)
default:
return errors.Wrap(err, "rmdir")
}
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime() time.Time {
return o.modTime
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(t time.Time) error {
return o.fs.touchFunc(o, t)
}
func (o *Object) stat() error {
return o.statStatArg(statArgC, path.Join(o.fs.root, o.remote))
}
func (o *Object) setMetadata(entry *adb.DirEntry) {
// Don't overwrite the values if we don't need to
// this avoids upsetting the race detector
if o.size != int64(entry.Size) {
o.size = int64(entry.Size)
}
if !o.modTime.Equal(entry.ModifiedAt) {
o.modTime = entry.ModifiedAt
}
if o.mode != entry.Mode {
o.mode = decodeEntryMode(uint32(entry.Mode))
}
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
const blockSize = 1 << 12
var offset, count int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
case *fs.SeekOption:
offset = x.Offset
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if offset > o.size {
offset = o.size
}
if count < 0 {
count = o.size - offset
} else if count+offset > o.size {
count = o.size - offset
}
fs.Debugf(o, "Open: remote: %q offset: %d count: %d", o.remote, offset, count)
if count == 0 {
return ioutil.NopCloser(bytes.NewReader(nil)), nil
}
offsetBlocks, offsetRest := offset/blockSize, offset%blockSize
countBlocks := (count-1)/blockSize + 1
conn, err := o.fs.device.execCommand(fmt.Sprintf("sh -c 'dd \"if=$0\" bs=%d skip=%d count=%d 2>/dev/null'", blockSize, offsetBlocks, countBlocks), path.Join(o.fs.root, o.remote))
if err != nil {
return nil, err
}
return &adbReader{
ReadCloser: readers.NewLimitedReadCloser(conn, count+offsetRest),
skip: offsetRest,
expected: count,
}, nil
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
for _, option := range options {
if option.Mandatory() {
fs.Logf(option, "Unsupported mandatory option: %v", option)
}
}
written, err := o.writeToFile(path.Join(o.fs.root, o.remote), in, 0666, src.ModTime())
if err != nil {
if removeErr := o.Remove(); removeErr != nil {
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
}
return err
}
expected := src.Size()
if expected == -1 {
expected = written
}
for _, t := range []int64{100, 250, 500, 1000, 2500, 5000, 10000} {
err = o.stat()
if err != nil {
return err
}
if o.size == expected {
return nil
}
fs.Debugf(o, "Invalid size after update, expected: %d got: %d", expected, o.size)
time.Sleep(time.Duration(t) * time.Millisecond)
}
return o.stat()
}
// Remove this object
func (o *Object) Remove() error {
p := path.Join(o.fs.root, o.remote)
output, code, err := o.fs.device.execCommandWithExitCode("rm", p)
switch err := err.(type) {
case nil:
return nil
case adb.ShellExitError:
return errors.Errorf("rm %q failed with %d: %q", o.remote, code, output)
default:
return errors.Wrap(err, "rm")
}
}
func (o *Object) writeToFile(path string, rd io.Reader, perms os.FileMode, modeTime time.Time) (written int64, err error) {
dst, err := o.fs.device.OpenWrite(path, perms, modeTime)
if err != nil {
return
}
defer fs.CheckClose(dst, &err)
return io.Copy(dst, rd)
}
type statFunc func(*Object) error
func (o *Object) statTry() error {
o.fs.statFuncMu.Lock()
defer o.fs.statFuncMu.Unlock()
for _, f := range []statFunc{
(*Object).statStatL, (*Object).statRealPath, (*Object).statReadLink,
} {
err := f(o)
if err != nil {
fs.Debugf(o, "%s", err)
} else {
o.fs.statFunc = f
return nil
}
}
return errors.Errorf("unable to resolve link target")
}
const (
statArgLc = "-Lc"
statArgC = "-c"
)
func (o *Object) statStatL() error {
return o.statStatArg(statArgLc, path.Join(o.fs.root, o.remote))
}
func (o *Object) statStatArg(arg, path string) error {
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("stat %s %s", arg, "%f,%s,%Y"), path)
output = strings.TrimSpace(output)
switch err := err.(type) {
case nil:
case adb.ShellExitError:
return errors.Errorf("stat %q failed with %d: %q", o.remote, code, output)
default:
return errors.Wrap(err, "stat")
}
parts := strings.Split(output, ",")
if len(parts) != 3 {
return errors.Errorf("stat %q invalid output %q", o.remote, output)
}
mode, err := strconv.ParseUint(parts[0], 16, 32)
if err != nil {
return errors.Errorf("stat %q invalid output %q", o.remote, output)
}
size, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
return errors.Errorf("stat %q invalid output %q", o.remote, output)
}
modTime, err := strconv.ParseInt(parts[2], 10, 64)
if err != nil {
return errors.Errorf("stat %q invalid output %q", o.remote, output)
}
o.size = int64(size)
o.modTime = time.Unix(modTime, 0)
o.mode = decodeEntryMode(uint32(mode))
return nil
}
func (o *Object) statReadLink() error {
p := path.Join(o.fs.root, o.remote)
output, code, err := o.fs.device.execCommandWithExitCode("readlink -f", p)
output = strings.TrimSuffix(output, "\n")
switch err := err.(type) {
case nil:
case adb.ShellExitError:
return errors.Errorf("readlink %q failed with %d: %q", o.remote, code, output)
default:
return errors.Wrap(err, "readlink")
}
return o.statStatArg(statArgC, output)
}
func (o *Object) statRealPath() error {
p := path.Join(o.fs.root, o.remote)
output, code, err := o.fs.device.execCommandWithExitCode("realpath", p)
output = strings.TrimSuffix(output, "\n")
switch err := err.(type) {
case nil:
case adb.ShellExitError:
return errors.Errorf("realpath %q failed with %d: %q", o.remote, code, output)
default:
return errors.Wrap(err, "realpath")
}
return o.statStatArg(statArgC, output)
}
type touchFunc func(*Object, time.Time) error
func (o *Object) touchTry(t time.Time) error {
o.fs.touchFuncMu.Lock()
defer o.fs.touchFuncMu.Unlock()
for _, f := range []touchFunc{
(*Object).touchCmd, (*Object).touchCd,
} {
err := f(o, t)
if err != nil {
fs.Debugf(o, "%s", err)
} else {
o.fs.touchFunc = f
return nil
}
}
return errors.Errorf("unable to resolve link target")
}
const (
touchArgCmd = "-cmd"
touchArgCd = "-cd"
)
func (o *Object) touchCmd(t time.Time) error {
return o.touchStatArg(touchArgCmd, path.Join(o.fs.root, o.remote), t)
}
func (o *Object) touchCd(t time.Time) error {
return o.touchStatArg(touchArgCd, path.Join(o.fs.root, o.remote), t)
}
func (o *Object) touchStatArg(arg, path string, t time.Time) error {
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("touch %s %s", arg, t.Format(time.RFC3339Nano)), path)
output = strings.TrimSpace(output)
switch err := err.(type) {
case nil:
case adb.ShellExitError:
return errors.Errorf("touch %q failed with %d: %q", o.remote, code, output)
default:
return errors.Wrap(err, "touch")
}
err = o.stat()
if err != nil {
return err
}
if diff, ok := checkTimeEqualWithPrecision(t, o.modTime, o.fs.Precision()); !ok {
return errors.Errorf("touch %q to %s was ineffective: %d", o.remote, t, diff)
}
return nil
}
func checkTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
dt := t0.Sub(t1)
if dt >= precision || dt <= -precision {
return dt, false
}
return dt, true
}
func decodeEntryMode(entryMode uint32) os.FileMode {
const (
unixIFBLK = 0x6000
unixIFMT = 0xf000
unixIFCHR = 0x2000
unixIFDIR = 0x4000
unixIFIFO = 0x1000
unixIFLNK = 0xa000
unixIFREG = 0x8000
unixIFSOCK = 0xc000
unixISGID = 0x400
unixISUID = 0x800
unixISVTX = 0x200
)
mode := os.FileMode(entryMode & 0777)
switch entryMode & unixIFMT {
case unixIFBLK:
mode |= os.ModeDevice
case unixIFCHR:
mode |= os.ModeDevice | os.ModeCharDevice
case unixIFDIR:
mode |= os.ModeDir
case unixIFIFO:
mode |= os.ModeNamedPipe
case unixIFLNK:
mode |= os.ModeSymlink
case unixIFREG:
// nothing to do
case unixIFSOCK:
mode |= os.ModeSocket
}
if entryMode&unixISGID != 0 {
mode |= os.ModeSetgid
}
if entryMode&unixISUID != 0 {
mode |= os.ModeSetuid
}
if entryMode&unixISVTX != 0 {
mode |= os.ModeSticky
}
return mode
}
type execDevice struct {
*adb.Device
}
func (d *execDevice) execCommandWithExitCode(cmd string, arg string) (string, int, error) {
cmdLine := fmt.Sprintf("sh -c '%s \"$0\"; echo :$?' '%s'", cmd, strings.Replace(arg, "'", "'\\''", -1))
fs.Debugf("adb", "exec: %s", cmdLine)
conn, err := d.execCommand(cmdLine)
if err != nil {
return "", -1, err
}
resp, err := conn.ReadUntilEof()
if err != nil {
return "", -1, errors.Wrap(err, "ExecCommand")
}
outStr := string(resp)
idx := strings.LastIndexByte(outStr, ':')
if idx == -1 {
return outStr, -1, fmt.Errorf("adb shell aborted, can not parse exit code")
}
exitCode, _ := strconv.Atoi(strings.TrimSpace(outStr[idx+1:]))
if exitCode != 0 {
err = adb.ShellExitError{Command: cmdLine, ExitCode: exitCode}
}
return outStr[:idx], exitCode, err
}
func (d *execDevice) execCommand(cmd string, args ...string) (*wire.Conn, error) {
cmd = prepareCommandLineEscaped(cmd, args...)
conn, err := d.Dial()
if err != nil {
return nil, errors.Wrap(err, "ExecCommand")
}
defer func() {
if err != nil && conn != nil {
_ = conn.Close()
}
}()
req := fmt.Sprintf("exec:%s", cmd)
if err = conn.SendMessage([]byte(req)); err != nil {
return nil, errors.Wrap(err, "ExecCommand")
}
if _, err = conn.ReadStatus(req); err != nil {
return nil, errors.Wrap(err, "ExecCommand")
}
return conn, nil
}
func prepareCommandLineEscaped(cmd string, args ...string) string {
for i, arg := range args {
args[i] = fmt.Sprintf("'%s'", strings.Replace(arg, "'", "'\\''", -1))
}
// Prepend the command to the args array.
if len(args) > 0 {
cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))
}
return cmd
}
type adbReader struct {
io.ReadCloser
skip int64
read int64
expected int64
}
func (r *adbReader) Read(b []byte) (n int, err error) {
n, err = r.ReadCloser.Read(b)
if s := r.skip; n > 0 && s > 0 {
_n := int64(n)
if _n <= s {
r.skip -= _n
return r.Read(b)
}
r.skip = 0
copy(b, b[s:n])
n -= int(s)
}
r.read += int64(n)
if err == io.EOF && r.read < r.expected {
fs.Debugf("adb", "Read: read: %d expected: %d n: %d", r.read, r.expected, n)
return n, io.ErrUnexpectedEOF
}
return n, err
}

View File

@@ -1,20 +0,0 @@
// Test ADB filesystem interface
package adb_test
import (
"testing"
"github.com/ncw/rclone/backend/adb"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAdb:/data/local/tmp",
NilObject: (*adb.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: "TestAdb", Key: "copy_links", Value: "true"},
},
})
}

View File

@@ -2,7 +2,6 @@ package all
import (
// Active file systems
_ "github.com/ncw/rclone/backend/adb"
_ "github.com/ncw/rclone/backend/alias"
_ "github.com/ncw/rclone/backend/amazonclouddrive"
_ "github.com/ncw/rclone/backend/azureblob"
@@ -17,6 +16,7 @@ import (
_ "github.com/ncw/rclone/backend/http"
_ "github.com/ncw/rclone/backend/hubic"
_ "github.com/ncw/rclone/backend/jottacloud"
_ "github.com/ncw/rclone/backend/koofr"
_ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/mega"
_ "github.com/ncw/rclone/backend/onedrive"

View File

@@ -155,7 +155,7 @@ type Fs struct {
noAuthClient *http.Client // unauthenticated http client
root string // the path we are working on
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
pacer *fs.Pacer // pacer for API calls
trueRootID string // ID of true root directory
tokenRenewer *oauthutil.Renew // renew the token on expiry
}
@@ -273,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root,
opt: *opt,
c: c,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
noAuthClient: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{

View File

@@ -144,7 +144,7 @@ type Fs struct {
containerOKMu sync.Mutex // mutex to protect container OK
containerOK bool // true if we have created the container
containerDeleted bool // true if we have deleted the container
pacer *pacer.Pacer // To pace and retry the API calls
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
}
@@ -347,7 +347,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
opt: *opt,
container: container,
root: directory,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
client: fshttp.NewClient(fs.Config),
}
@@ -1386,16 +1386,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
blob := o.getBlobReference()
httpHeaders := azblob.BlobHTTPHeaders{}
httpHeaders.ContentType = fs.MimeType(o)
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate
// MD5 only for PutBlob requests
if size < int64(o.fs.opt.UploadCutoff) {
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
// Compute the Content-MD5 of the file, for multiparts uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
// in order to validate its integrity during transport
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
}

View File

@@ -167,7 +167,7 @@ type Fs struct {
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadURLResponse // result of get upload URL calls
authMu sync.Mutex // lock for authorizing the account
pacer *pacer.Pacer // To pace and retry the API calls
pacer *fs.Pacer // To pace and retry the API calls
bufferTokens chan []byte // control concurrency of multipart uploads
}
@@ -251,13 +251,7 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
}
}
retryAfterDuration := time.Duration(retryAfter) * time.Second
if f.pacer.GetSleep() < retryAfterDuration {
fs.Debugf(f, "Setting sleep to %v after error: %v", retryAfterDuration, err)
// We set 1/2 the value here because the pacer will double it immediately
f.pacer.SetSleep(retryAfterDuration / 2)
}
return true, err
return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -363,7 +357,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
bucket: bucket,
root: directory,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
ReadMimeType: true,

View File

@@ -111,7 +111,7 @@ type Fs struct {
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
}
@@ -260,7 +260,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
}
f.features = (&fs.Features{

View File

@@ -186,10 +186,10 @@ func init() {
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
Help: "Google Application Client Secret\nSetting your own is recommended.",
}, {
Name: "scope",
Help: "Scope that rclone should use when requesting access from drive.",
@@ -426,7 +426,7 @@ type Fs struct {
client *http.Client // authorized client
rootFolderID string // the id of the root folder
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // To pace the API calls
pacer *fs.Pacer // To pace the API calls
exportExtensions []string // preferred extensions to download docs
importMimeTypes []string // MIME types to convert to docs
isTeamDrive bool // true if this is a team drive
@@ -676,28 +676,33 @@ func isPowerOfTwo(x int64) bool {
}
// add a charset parameter to all text/* MIME types
func fixMimeType(mimeType string) string {
mediaType, param, err := mime.ParseMediaType(mimeType)
func fixMimeType(mimeTypeIn string) string {
if mimeTypeIn == "" {
return ""
}
mediaType, param, err := mime.ParseMediaType(mimeTypeIn)
if err != nil {
return mimeType
return mimeTypeIn
}
if strings.HasPrefix(mimeType, "text/") && param["charset"] == "" {
mimeTypeOut := mimeTypeIn
if strings.HasPrefix(mediaType, "text/") && param["charset"] == "" {
param["charset"] = "utf-8"
mimeType = mime.FormatMediaType(mediaType, param)
mimeTypeOut = mime.FormatMediaType(mediaType, param)
}
return mimeType
if mimeTypeOut == "" {
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
}
return mimeTypeOut
}
func fixMimeTypeMap(m map[string][]string) map[string][]string {
for _, v := range m {
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
out = make(map[string][]string, len(in))
for k, v := range in {
for i, mt := range v {
fixed := fixMimeType(mt)
if fixed == "" {
panic(errors.Errorf("unable to fix MIME type %q", mt))
}
v[i] = fixed
v[i] = fixMimeType(mt)
}
out[fixMimeType(k)] = v
}
return m
return out
}
func isInternalMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
@@ -789,8 +794,8 @@ func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
}
// newPacer makes a pacer configured for drive
func newPacer(opt *Options) *pacer.Pacer {
return pacer.New().SetMinSleep(time.Duration(opt.PacerMinSleep)).SetBurst(opt.PacerBurst).SetPacer(pacer.GoogleDrivePacer)
func newPacer(opt *Options) *fs.Pacer {
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
}
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
@@ -902,6 +907,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: true,
}).Fill(f)
// Create a new authorized Drive client.
@@ -2430,6 +2436,10 @@ func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (
return req, nil, err
}
fs.OpenOptionAddHTTPHeaders(req.Header, options)
if o.bytes == 0 {
// Don't supply range requests for 0 length objects as they always fail
delete(req.Header, "Range")
}
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.client.Do(req)
if err == nil {

View File

@@ -160,7 +160,7 @@ type Fs struct {
team team.Client // for the Teams API
slashRoot string // root with "/" prefix, lowercase
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *pacer.Pacer // To pace the API calls
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
}
@@ -209,7 +209,7 @@ func shouldRetry(err error) (bool, error) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
}
@@ -273,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f := &Fs{
name: name,
opt: *opt,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
config := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo

View File

@@ -16,6 +16,7 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
*/
import (
"context"
"encoding/base64"
"encoding/hex"
"fmt"
@@ -45,6 +46,8 @@ import (
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
// NOTE: This API is deprecated
storage "google.golang.org/api/storage/v1"
)
@@ -144,6 +147,22 @@ func init() {
Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
}},
}, {
Name: "bucket_policy_only",
Help: `Access checks should use bucket-level IAM policies.
If you want to upload objects to a bucket with Bucket Policy Only set
then you will need to set this.
When it is set, rclone:
- ignores ACLs set on buckets
- ignores ACLs set on objects
- creates buckets with Bucket Policy Only set
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
`,
Default: false,
}, {
Name: "location",
Help: "Location for the newly created buckets.",
@@ -241,6 +260,7 @@ type Options struct {
ServiceAccountCredentials string `config:"service_account_credentials"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
}
@@ -256,7 +276,7 @@ type Fs struct {
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
pacer *pacer.Pacer // To pace the API calls
pacer *fs.Pacer // To pace the API calls
}
// Object describes a storage object
@@ -381,7 +401,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} else {
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
}
}
}
@@ -395,7 +419,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
bucket: bucket,
root: directory,
opt: *opt,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
}
f.features = (&fs.Features{
ReadMimeType: true,
@@ -709,8 +733,19 @@ func (f *Fs) Mkdir(dir string) (err error) {
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
}
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
}
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Do()
return shouldRetry(err)
})
if err == nil {
@@ -976,7 +1011,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Do()
return shouldRetry(err)
})
if err != nil {

View File

@@ -190,7 +190,7 @@ type Fs struct {
endpointURL string
srv *rest.Client
apiSrv *rest.Client
pacer *pacer.Pacer
pacer *fs.Pacer
tokenRenewer *oauthutil.Renew // renew the token on expiry
}
@@ -381,6 +381,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
// add jottacloud to the long list of sites that don't follow the oauth spec correctly
oauth2.RegisterBrokenAuthHeaderProvider("https://www.jottacloud.com/")
// the oauth client for the api servers needs
// a filter to fix the grant_type issues (see above)
baseClient := fshttp.NewClient(fs.Config)
@@ -403,7 +406,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: true,

589
backend/koofr/koofr.go Normal file
View File

@@ -0,0 +1,589 @@
package koofr
import (
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
httpclient "github.com/koofr/go-httpclient"
koofrclient "github.com/koofr/go-koofrclient"
)
// Register Fs with rclone
func init() {
fs.Register(&fs.RegInfo{
Name: "koofr",
Description: "Koofr",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "endpoint",
Help: "The Koofr API endpoint to use",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
IsPassword: true,
Required: true,
},
},
})
}
// Options represent the configuration of the Koofr backend
type Options struct {
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
Password string `config:"password"`
}
// A Fs is a representation of a remote Koofr Fs
type Fs struct {
name string
mountID string
root string
opt Options
features *fs.Features
client *koofrclient.KoofrClient
}
// An Object on the remote Koofr Fs
type Object struct {
fs *Fs
remote string
info koofrclient.FileInfo
}
func base(pth string) string {
rv := path.Base(pth)
if rv == "" || rv == "." {
rv = "/"
}
return rv
}
func dir(pth string) string {
rv := path.Dir(pth)
if rv == "" || rv == "." {
rv = "/"
}
return rv
}
// String returns a string representation of the remote Object
func (o *Object) String() string {
return o.remote
}
// Remote returns the remote path of the Object, relative to Fs root
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the Object
func (o *Object) ModTime() time.Time {
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
}
// Size return the size of the Object in bytes
func (o *Object) Size() int64 {
return o.info.Size
}
// Fs returns a reference to the Koofr Fs containing the Object
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns an MD5 hash of the Object
func (o *Object) Hash(typ hash.Type) (string, error) {
if typ == hash.MD5 {
return o.info.Hash, nil
}
return "", nil
}
// fullPath returns full path of the remote Object (including Fs root)
func (o *Object) fullPath() string {
return o.fs.fullPath(o.remote)
}
// Storable returns true if the Object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime is not supported
func (o *Object) SetModTime(mtime time.Time) error {
return nil
}
// Open opens the Object for reading
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var sOff, eOff int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
sOff = x.Offset
case *fs.RangeOption:
sOff = x.Start
eOff = x.End
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if sOff == 0 && eOff < 0 {
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
}
if sOff < 0 {
sOff = o.Size() - eOff
eOff = o.Size()
}
if eOff > o.Size() {
eOff = o.Size()
}
span := &koofrclient.FileSpan{
Start: sOff,
End: eOff,
}
return o.fs.client.FilesGetRange(o.fs.mountID, o.fullPath(), span)
}
// Update updates the Object contents
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
putopts := &koofrclient.PutFilter{
ForceOverwrite: true,
NoRename: true,
IgnoreNonExisting: true,
}
fullPath := o.fullPath()
dirPath := dir(fullPath)
name := base(fullPath)
err := o.fs.mkdir(dirPath)
if err != nil {
return err
}
info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts)
if err != nil {
return err
}
o.info = *info
return nil
}
// Remove deletes the remote Object
func (o *Object) Remove() error {
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
}
// Name returns the name of the Fs
func (f *Fs) Name() string {
return f.name
}
// Root returns the root path of the Fs
func (f *Fs) Root() string {
return f.root
}
// String returns a string representation of the Fs
func (f *Fs) String() string {
return "koofr:" + f.mountID + ":" + f.root
}
// Features returns the optional features supported by this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision denotes that setting modification times is not supported
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns a set of hashes are Provided by the Fs
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// fullPath constructs a full, absolute path from a Fs root relative path,
func (f *Fs) fullPath(part string) string {
return path.Join("/", f.root, part)
}
// NewFs constructs a new filesystem given a root path and configuration options
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
pass, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, err
}
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
client.HTTPClient.Headers.Set("Authorization", basicAuth)
mounts, err := client.Mounts()
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
client: client,
}
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(f)
for _, m := range mounts {
if opt.MountID != "" {
if m.Id == opt.MountID {
f.mountID = m.Id
break
}
} else if m.IsPrimary {
f.mountID = m.Id
break
}
}
if f.mountID == "" {
if opt.MountID == "" {
return nil, errors.New("Failed to find primary mount")
}
return nil, errors.New("Failed to find mount " + opt.MountID)
}
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
if err == nil && rootFile.Type != "dir" {
f.root = dir(f.root)
err = fs.ErrorIsFile
} else {
err = nil
}
return f, err
}
// List returns a list of items in a directory
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil {
return nil, translateErrorsDir(err)
}
entries = make([]fs.DirEntry, len(files))
for i, file := range files {
if file.Type == "dir" {
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
} else {
entries[i] = &Object{
fs: f,
info: file,
remote: path.Join(dir, file.Name),
}
}
}
return entries, nil
}
// NewObject creates a new remote Object for a given remote path
func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
if err != nil {
return nil, translateErrorsObject(err)
}
if info.Type == "dir" {
return nil, fs.ErrorNotAFile
}
return &Object{
fs: f,
info: info,
remote: remote,
}, nil
}
// Put updates a remote Object
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
putopts := &koofrclient.PutFilter{
ForceOverwrite: true,
NoRename: true,
IgnoreNonExisting: true,
}
fullPath := f.fullPath(src.Remote())
dirPath := dir(fullPath)
name := base(fullPath)
err = f.mkdir(dirPath)
if err != nil {
return nil, err
}
info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts)
if err != nil {
return nil, translateErrorsObject(err)
}
return &Object{
fs: f,
info: *info,
remote: src.Remote(),
}, nil
}
// PutStream updates a remote Object with a stream of unknown size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// isBadRequest is a predicate which holds true iff the error returned was
// HTTP status 400
func isBadRequest(err error) bool {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusBadRequest {
return true
}
}
return false
}
// translateErrorsDir translates koofr errors to rclone errors (for a dir
// operation)
func translateErrorsDir(err error) error {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusNotFound {
return fs.ErrorDirNotFound
}
}
return err
}
// translatesErrorsObject translates Koofr errors to rclone errors (for an object operation)
func translateErrorsObject(err error) error {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// mkdir creates a directory at the given remote path. Creates ancestors if
// neccessary
func (f *Fs) mkdir(fullPath string) error {
if fullPath == "/" {
return nil
}
info, err := f.client.FilesInfo(f.mountID, fullPath)
if err == nil && info.Type == "dir" {
return nil
}
err = translateErrorsDir(err)
if err != nil && err != fs.ErrorDirNotFound {
return err
}
dirs := strings.Split(fullPath, "/")
parent := "/"
for _, part := range dirs {
if part == "" {
continue
}
info, err = f.client.FilesInfo(f.mountID, path.Join(parent, part))
if err != nil || info.Type != "dir" {
err = translateErrorsDir(err)
if err != nil && err != fs.ErrorDirNotFound {
return err
}
err = f.client.FilesNewFolder(f.mountID, parent, part)
if err != nil && !isBadRequest(err) {
return err
}
}
parent = path.Join(parent, part)
}
return nil
}
// Mkdir creates a directory at the given remote path. Creates ancestors if
// necessary
func (f *Fs) Mkdir(dir string) error {
fullPath := f.fullPath(dir)
return f.mkdir(fullPath)
}
// Rmdir removes an (empty) directory at the given remote path
func (f *Fs) Rmdir(dir string) error {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil {
return translateErrorsDir(err)
}
if len(files) > 0 {
return fs.ErrorDirectoryNotEmpty
}
err = f.client.FilesDelete(f.mountID, f.fullPath(dir))
if err != nil {
return translateErrorsDir(err)
}
return nil
}
// Copy copies a remote Object to the given path
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return nil, fs.ErrorCantCopy
}
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
f.mountID, dstFullPath)
if err != nil {
return nil, fs.ErrorCantCopy
}
return f.NewObject(remote)
}
// Move moves a remote Object to the given path
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj := src.(*Object)
dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return nil, fs.ErrorCantMove
}
err = f.client.FilesMove(srcObj.fs.mountID,
srcObj.fs.fullPath(srcObj.remote), f.mountID, dstFullPath)
if err != nil {
return nil, fs.ErrorCantMove
}
return f.NewObject(remote)
}
// DirMove moves a remote directory to the given path
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs := src.(*Fs)
srcFullPath := srcFs.fullPath(srcRemote)
dstFullPath := f.fullPath(dstRemote)
if srcFs.mountID == f.mountID && srcFullPath == dstFullPath {
return fs.ErrorDirExists
}
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return fs.ErrorCantDirMove
}
err = f.client.FilesMove(srcFs.mountID, srcFullPath, f.mountID, dstFullPath)
if err != nil {
return fs.ErrorCantDirMove
}
return nil
}
// About reports space usage (with a MB precision)
func (f *Fs) About() (*fs.Usage, error) {
mount, err := f.client.MountsDetails(f.mountID)
if err != nil {
return nil, err
}
return &fs.Usage{
Total: fs.NewUsageValue(mount.SpaceTotal * 1024 * 1024),
Used: fs.NewUsageValue(mount.SpaceUsed * 1024 * 1024),
Trashed: nil,
Other: nil,
Free: fs.NewUsageValue((mount.SpaceTotal - mount.SpaceUsed) * 1024 * 1024),
Objects: nil,
}, nil
}
// Purge purges the complete Fs
func (f *Fs) Purge() error {
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
return err
}
// linkCreate is a Koofr API request for creating a public link
type linkCreate struct {
Path string `json:"path"`
}
// link is a Koofr API response to creating a public link
type link struct {
ID string `json:"id"`
Name string `json:"name"`
Path string `json:"path"`
Counter int64 `json:"counter"`
URL string `json:"url"`
ShortURL string `json:"shortUrl"`
Hash string `json:"hash"`
Host string `json:"host"`
HasPassword bool `json:"hasPassword"`
Password string `json:"password"`
ValidFrom int64 `json:"validFrom"`
ValidTo int64 `json:"validTo"`
PasswordRequired bool `json:"passwordRequired"`
}
// createLink makes a Koofr API call to create a public link
func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link, error) {
linkCreate := linkCreate{
Path: path,
}
linkData := link{}
request := httpclient.RequestData{
Method: "POST",
Path: "/api/v2/mounts/" + mountID + "/links",
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: linkCreate,
RespEncoding: httpclient.EncodingJSON,
RespValue: &linkData,
}
_, err := c.Request(&request)
if err != nil {
return nil, err
}
return &linkData, nil
}
// PublicLink creates a public link to the remote path
func (f *Fs) PublicLink(remote string) (string, error) {
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
if err != nil {
return "", translateErrorsDir(err)
}
return linkData.ShortURL, nil
}

View File

@@ -0,0 +1,14 @@
package koofr_test
import (
"testing"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestKoofr:",
})
}

View File

@@ -98,7 +98,7 @@ type Fs struct {
opt Options // parsed config options
features *fs.Features // optional features
srv *mega.Mega // the connection to the server
pacer *pacer.Pacer // pacer for API calls
pacer *fs.Pacer // pacer for API calls
rootNodeMu sync.Mutex // mutex for _rootNode
_rootNode *mega.Node // root node - call findRoot to use this
mkdirMu sync.Mutex // used to serialize calls to mkdir / rmdir
@@ -217,7 +217,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root,
opt: *opt,
srv: srv,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
DuplicateFiles: true,

View File

@@ -261,7 +261,7 @@ type Fs struct {
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
driveID string // ID to use for querying Microsoft Graph
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
@@ -475,7 +475,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
driveID: opt.DriveID,
driveType: opt.DriveType,
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: true,

View File

@@ -65,7 +65,7 @@ type Fs struct {
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
pacer *pacer.Pacer // To pace and retry the API calls
pacer *fs.Pacer // To pace and retry the API calls
session UserSessionInfo // contains the session data
dirCache *dircache.DirCache // Map of directory path to directory id
}
@@ -144,7 +144,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root,
opt: *opt,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.dirCache = dircache.New(root, "0", f)

View File

@@ -95,7 +95,7 @@ type Fs struct {
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
}
@@ -254,7 +254,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: false,

View File

@@ -782,7 +782,7 @@ type Fs struct {
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
pacer *pacer.Pacer // To pace the API calls
pacer *fs.Pacer // To pace the API calls
srv *http.Client // a plain http client
}
@@ -1055,7 +1055,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
c: c,
bucket: bucket,
ses: ses,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
srv: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{

View File

@@ -427,6 +427,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
}
return NewFsWithConnection(name, root, opt, sshConfig)
}
// NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to
// the host specified in the ssh.ClientConfig
func NewFsWithConnection(name string, root string, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,

View File

@@ -216,7 +216,7 @@ type Fs struct {
containerOK bool // true if we have created the container
segmentsContainer string // container to store the segments (if any) in
noCheckContainer bool // don't check the container before creating it
pacer *pacer.Pacer // To pace the API calls
pacer *fs.Pacer // To pace the API calls
}
// Object describes a swift object
@@ -401,7 +401,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
segmentsContainer: container + "_segments",
root: directory,
noCheckContainer: noCheckContainer,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
}
f.features = (&fs.Features{
ReadMimeType: true,

View File

@@ -101,7 +101,7 @@ type Fs struct {
endpoint *url.URL // URL of the host
endpointURL string // endpoint as a string
srv *rest.Client // the connection to the one drive server
pacer *pacer.Pacer // pacer for API calls
pacer *fs.Pacer // pacer for API calls
precision time.Duration // mod time precision
canStream bool // set if can stream
useOCMtime bool // set if can use X-OC-Mtime
@@ -318,7 +318,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
endpoint: u,
endpointURL: u.String(),
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
precision: fs.ModTimeNotSupported,
}
f.features = (&fs.Features{

View File

@@ -93,7 +93,7 @@ type Fs struct {
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the yandex server
pacer *pacer.Pacer // pacer for API calls
pacer *fs.Pacer // pacer for API calls
diskRoot string // root path with "disk:/" container name
}
@@ -269,7 +269,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
name: name,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.setRoot(root)
f.features = (&fs.Features{

View File

@@ -17,14 +17,18 @@ import (
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"time"
"github.com/ncw/rclone/lib/rest"
"golang.org/x/net/html"
"golang.org/x/sys/unix"
)
@@ -33,6 +37,7 @@ var (
install = flag.Bool("install", false, "Install the downloaded package using sudo dpkg -i.")
extract = flag.String("extract", "", "Extract the named executable from the .tar.gz and install into bindir.")
bindir = flag.String("bindir", defaultBinDir(), "Directory to install files downloaded with -extract.")
useAPI = flag.Bool("use-api", false, "Use the API for finding the release instead of scraping the page.")
// Globals
matchProject = regexp.MustCompile(`^([\w-]+)/([\w-]+)$`)
osAliases = map[string][]string{
@@ -209,6 +214,55 @@ func getAsset(project string, matchName *regexp.Regexp) (string, string) {
return "", ""
}
// Get an asset URL and name by scraping the downloads page
//
// This doesn't use the API so isn't rate limited when not using GITHUB login details
func getAssetFromReleasesPage(project string, matchName *regexp.Regexp) (assetURL string, assetName string) {
baseURL := "https://github.com/" + project + "/releases"
log.Printf("Fetching asset info for %q from %q", project, baseURL)
base, err := url.Parse(baseURL)
if err != nil {
log.Fatalf("URL Parse failed: %v", err)
}
resp, err := http.Get(baseURL)
if err != nil {
log.Fatalf("Failed to fetch release info %q: %v", baseURL, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
log.Printf("Error: %s", readBody(resp.Body))
log.Fatalf("Bad status %d when fetching %q release info: %s", resp.StatusCode, baseURL, resp.Status)
}
doc, err := html.Parse(resp.Body)
if err != nil {
log.Fatalf("Failed to parse web page: %v", err)
}
var walk func(*html.Node)
walk = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key == "href" {
if name := path.Base(a.Val); matchName.MatchString(name) && isOurOsArch(name) {
if u, err := rest.URLJoin(base, a.Val); err == nil {
assetName = name
assetURL = u.String()
}
}
break
}
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
walk(c)
}
}
walk(doc)
if assetName == "" || assetURL == "" {
log.Fatalf("Didn't find URL in page")
}
return assetURL, assetName
}
// isOurOsArch returns true if s contains our OS and our Arch
func isOurOsArch(s string) bool {
s = strings.ToLower(s)
@@ -346,7 +400,12 @@ func main() {
log.Fatalf("Invalid regexp for name %q: %v", nameRe, err)
}
assetURL, assetName := getAsset(project, matchName)
var assetURL, assetName string
if *useAPI {
assetURL, assetName = getAsset(project, matchName)
} else {
assetURL, assetName = getAssetFromReleasesPage(project, matchName)
}
fileName := filepath.Join(os.TempDir(), assetName)
getFile(assetURL, fileName)

View File

@@ -36,6 +36,7 @@ docs = [
"http.md",
"hubic.md",
"jottacloud.md",
"koofr.md",
"mega.md",
"azureblob.md",
"onedrive.md",

View File

@@ -341,8 +341,7 @@ func initConfig() {
configflags.SetFlags()
// Load filters
var err error
filter.Active, err = filter.NewFilter(&filterflags.Opt)
err := filterflags.Reload()
if err != nil {
log.Fatalf("Failed to load filters: %v", err)
}

View File

@@ -7,8 +7,13 @@ import (
"github.com/spf13/cobra"
)
var (
createEmptySrcDirs = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy")
}
var commandDefintion = &cobra.Command{
@@ -69,7 +74,7 @@ changed recently very efficiently like this:
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return sync.CopyDir(fdst, fsrc)
return sync.CopyDir(fdst, fsrc, createEmptySrcDirs)
}
return operations.CopyFile(fdst, fsrc, srcFileName, srcFileName)
})

View File

@@ -48,7 +48,7 @@ destination.
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return sync.CopyDir(fdst, fsrc)
return sync.CopyDir(fdst, fsrc, false)
}
return operations.CopyFile(fdst, fsrc, dstFileName, srcFileName)
})

View File

@@ -45,7 +45,7 @@ __rclone_custom_func() {
else
__rclone_init_completion -n : || return
fi
if [[ $cur =~ ^[[:alnum:]]*$ ]]; then
if [[ $cur =~ ^[[:alnum:]_]*$ ]]; then
local remote
while IFS= read -r remote; do
[[ $remote != $cur* ]] || COMPREPLY+=("$remote")
@@ -54,7 +54,7 @@ __rclone_custom_func() {
local paths=("$cur"*)
[[ ! -f ${paths[0]} ]] || COMPREPLY+=("${paths[@]}")
fi
elif [[ $cur =~ ^[[:alnum:]]+: ]]; then
elif [[ $cur =~ ^[[:alnum:]_]+: ]]; then
local path=${cur#*:}
if [[ $path == */* ]]; then
local prefix=${path%/*}

View File

@@ -10,7 +10,6 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -67,8 +66,10 @@ output:
s - size
t - modification time
h - hash
i - ID of object if known
i - ID of object
o - Original ID of underlying object
m - MimeType of object if known
e - encrypted name
So if you wanted the path, size and modification time, you would use
--format "pst", or maybe --format "tsp" to put the path last.
@@ -161,6 +162,10 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
list.SetCSV(csv)
list.SetDirSlash(dirSlash)
list.SetAbsolute(absolute)
var opt = operations.ListJSONOpt{
NoModTime: true,
Recurse: recurse,
}
for _, char := range format {
switch char {
@@ -168,38 +173,38 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
list.AddPath()
case 't':
list.AddModTime()
opt.NoModTime = false
case 's':
list.AddSize()
case 'h':
list.AddHash(hashType)
opt.ShowHash = true
case 'i':
list.AddID()
case 'm':
list.AddMimeType()
case 'e':
list.AddEncrypted()
opt.ShowEncrypted = true
case 'o':
list.AddOrigID()
opt.ShowOrigIDs = true
default:
return errors.Errorf("Unknown format character %q", char)
}
}
return walk.Walk(fsrc, "", false, operations.ConfigMaxDepth(recurse), func(path string, entries fs.DirEntries, err error) error {
if err != nil {
fs.CountError(err)
fs.Errorf(path, "error listing: %v", err)
return nil
}
for _, entry := range entries {
_, isDir := entry.(fs.Directory)
if isDir {
if filesOnly {
continue
}
} else {
if dirsOnly {
continue
}
return operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error {
if item.IsDir {
if filesOnly {
return nil
}
} else {
if dirsOnly {
return nil
}
_, _ = fmt.Fprintln(out, list.Format(entry))
}
_, _ = fmt.Fprintln(out, list.Format(item))
return nil
})
}

View File

@@ -10,11 +10,13 @@ import (
// Globals
var (
deleteEmptySrcDirs = false
createEmptySrcDirs = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move")
}
var commandDefintion = &cobra.Command{
@@ -52,7 +54,7 @@ can speed transfers up greatly.
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs)
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs, createEmptySrcDirs)
}
return operations.MoveFile(fdst, fsrc, srcFileName, srcFileName)
})

View File

@@ -52,7 +52,7 @@ transfer.
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return sync.MoveDir(fdst, fsrc, false)
return sync.MoveDir(fdst, fsrc, false, false)
}
return operations.MoveFile(fdst, fsrc, dstFileName, srcFileName)
})

View File

@@ -10,6 +10,7 @@ import (
"sort"
"strings"
runewidth "github.com/mattn/go-runewidth"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/ncdu/scan"
"github.com/ncw/rclone/fs"
@@ -122,7 +123,7 @@ func Printf(x, y int, fg, bg termbox.Attribute, format string, args ...interface
func Line(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, msg string) {
for _, c := range msg {
termbox.SetCell(x, y, c, fg, bg)
x++
x += runewidth.RuneWidth(c)
if x >= xmax {
return
}

View File

@@ -6,8 +6,13 @@ import (
"github.com/spf13/cobra"
)
var (
createEmptySrcDirs = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after sync")
}
var commandDefintion = &cobra.Command{
@@ -39,7 +44,7 @@ go there.
cmd.CheckArgs(2, 2, command, args)
fsrc, fdst := cmd.NewFsSrcDst(args)
cmd.Run(true, true, command, func() error {
return sync.Sync(fdst, fsrc)
return sync.Sync(fdst, fsrc, createEmptySrcDirs)
})
},
}

View File

@@ -29,6 +29,7 @@ Rclone is a command line program to sync files and directories to and from:
* {{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
* {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
* {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
* {{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
* {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
* {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
* {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}

View File

@@ -237,3 +237,11 @@ Contributors
* Jonathan <vanillajonathan@users.noreply.github.com>
* James Carpenter <orbsmiv@users.noreply.github.com>
* Vince <vince0villamora@gmail.com>
* Nestar47 <47841759+Nestar47@users.noreply.github.com>
* Six <brbsix@gmail.com>
* Alexandru Bumbacea <alexandru.bumbacea@booking.com>
* calisro <robert.calistri@gmail.com>
* Dr.Rx <david.rey@nventive.com>
* marcintustin <marcintustin@users.noreply.github.com>
* jaKa Močnik <jaka@koofr.net>
* Fionera <fionera@fionera.de>

View File

@@ -1,7 +1,7 @@
---
title: "Documentation"
description: "Rclone Usage"
date: "2015-06-06"
date: "2019-02-25"
---
Configure
@@ -34,6 +34,7 @@ See the following for detailed instructions for
* [HTTP](/http/)
* [Hubic](/hubic/)
* [Jottacloud](/jottacloud/)
* [Koofr](/koofr/)
* [Mega](/mega/)
* [Microsoft Azure Blob Storage](/azureblob/)
* [Microsoft OneDrive](/onedrive/)
@@ -98,7 +99,7 @@ The main rclone commands with most used first
* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output shell completion scripts for rclone.
* [rclone gendocs](/commands/rclone_gendocs/) - Output markdown docs for rclone to the directory supplied.
* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file.
* [rclone mount](/commands/rclone_mount/) - Mount the remote as a mountpoint. **EXPERIMENTAL**
* [rclone mount](/commands/rclone_mount/) - Mount the remote as a mountpoint.
* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest.
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone.conf
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Check the integrity of a crypted remote.
@@ -170,11 +171,24 @@ should be the name or prefix of a backend (the `type` in the config
file) and all the configuration for the backend should be provided on
the command line (or in environment variables).
Eg
Here are some examples:
rclone lsd --http-url https://pub.rclone.org :http:
Which lists all the directories in `pub.rclone.org`.
To list all the directories in the root of `https://pub.rclone.org/`.
rclone lsf --http-url https://example.com :http:path/to/dir
To list files and directories in `https://example.com/path/to/dir/`
rclone copy --http-url https://example.com :http:path/to/dir /tmp/dir
To copy files and directories in `https://example.com/path/to/dir` to `/tmp/dir`.
rclone copy --sftp-host example.com :sftp:path/to/dir /tmp/dir
To copy files and directories from `example.com` in the relative
directory `path/to/dir` to `/tmp/dir` using sftp.
Quoting and the shell
---------------------

View File

@@ -854,6 +854,15 @@ The most likely cause of this is the duplicated file issue above - run
`rclone dedupe` and check your logs for duplicate object or directory
messages.
This can also be caused by a delay/caching on google drive's end when
comparing directory listings. Specifically with team drives used in
combination with --fast-list. Files that were uploaded recently may
not appear on the directory list sent to rclone when using --fast-list.
Waiting a moderate period of time between attempts (estimated to be
approximately 1 hour) and/or not using --fast-list both seem to be
effective in preventing the problem.
### Making your own client_id ###
When you use rclone with Google drive in its default configuration you

View File

@@ -188,3 +188,10 @@ causes not all domains to be resolved properly.
Additionally with the `GODEBUG=netdns=` environment variable the Go
resolver decision can be influenced. This also allows to resolve certain
issues with DNS resolution. See the [name resolution section in the go docs](https://golang.org/pkg/net/#hdr-Name_Resolution).
### The total size reported in the stats for a sync is wrong and keeps changing
It is likely you have more than 10,000 files that need to be
synced. By default rclone only gets 10,000 files ahead in a sync so as
not to use up too much memory. You can change this default with the
[--max-backlog](/docs/#max-backlog-n) flag.

View File

@@ -217,6 +217,20 @@ the rclone config file, you can set `service_account_credentials` with
the actual contents of the file instead, or set the equivalent
environment variable.
### Application Default Credentials ###
If no other source of credentials is provided, rclone will fall back
to
[Application Default Credentials](https://cloud.google.com/video-intelligence/docs/common/auth#authenticating_with_application_default_credentials)
this is useful both when you already have configured authentication
for your developer account, or in production when running on a google
compute host. Note that if running in docker, you may need to run
additional commands on your google compute machine -
[see this page](https://cloud.google.com/container-registry/docs/advanced-authentication#gcloud_as_a_docker_credential_helper).
Note that in the case application default credentials are used, there
is no need to explicitly configure a project number.
### --fast-list ###
This remote supports `--fast-list` which allows you to use fewer
@@ -328,6 +342,27 @@ Access Control List for new buckets.
- "publicReadWrite"
- Project team owners get OWNER access, and all Users get WRITER access.
#### --gcs-bucket-policy-only
Access checks should use bucket-level IAM policies.
If you want to upload objects to a bucket with Bucket Policy Only set
then you will need to set this.
When it is set, rclone:
- ignores ACLs set on buckets
- ignores ACLs set on objects
- creates buckets with Bucket Policy Only set
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
- Config: bucket_policy_only
- Env Var: RCLONE_GCS_BUCKET_POLICY_ONLY
- Type: bool
- Default: false
#### --gcs-location
Location for the newly created buckets.

189
docs/content/koofr.md Normal file
View File

@@ -0,0 +1,189 @@
---
title: "Koofr"
description: "Rclone docs for Koofr"
date: "2019-02-25"
---
<i class="fa fa-suitcase"></i> Koofr
-----------------------------------------
Paths are specified as `remote:path`
Paths may be as deep as required, eg `remote:directory/subdirectory`.
The initial setup for Koofr involves creating an application password for
rclone. You can do that by opening the Koofr
[web application](https://app.koofr.net/app/admin/preferences/password),
giving the password a nice name like `rclone` and clicking on generate.
Here is an example of how to make a remote called `koofr`. First run:
rclone config
This will guide you through an interactive setup process:
```
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
name> koofr
Type of storage to configure.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
1 / A stackable unification remote, which can appear to merge the contents of several remotes
\ "union"
2 / Alias for a existing remote
\ "alias"
3 / Amazon Drive
\ "amazon cloud drive"
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)
\ "s3"
5 / Backblaze B2
\ "b2"
6 / Box
\ "box"
7 / Cache a remote
\ "cache"
8 / Dropbox
\ "dropbox"
9 / Encrypt/Decrypt a remote
\ "crypt"
10 / FTP Connection
\ "ftp"
11 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
12 / Google Drive
\ "drive"
13 / Hubic
\ "hubic"
14 / JottaCloud
\ "jottacloud"
15 / Koofr
\ "koofr"
16 / Local Disk
\ "local"
17 / Mega
\ "mega"
18 / Microsoft Azure Blob Storage
\ "azureblob"
19 / Microsoft OneDrive
\ "onedrive"
20 / OpenDrive
\ "opendrive"
21 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
22 / Pcloud
\ "pcloud"
23 / QingCloud Object Storage
\ "qingstor"
24 / SSH/SFTP Connection
\ "sftp"
25 / Webdav
\ "webdav"
26 / Yandex Disk
\ "yandex"
27 / http Connection
\ "http"
Storage> koofr
** See help for koofr backend at: https://rclone.org/koofr/ **
Your Koofr user name
Enter a string value. Press Enter for the default ("").
user> USER@NAME
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
y) Yes type in my own password
g) Generate random password
y/g> y
Enter the password:
password:
Confirm the password:
password:
Edit advanced config? (y/n)
y) Yes
n) No
y/n> n
Remote config
--------------------
[koofr]
type = koofr
baseurl = https://app.koofr.net
user = USER@NAME
password = *** ENCRYPTED ***
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
You can choose to edit advanced config in order to enter your own service URL
if you use an on-premise or white label Koofr instance, or choose an alternative
mount instead of your primary storage.
Once configured you can then use `rclone` like this,
List directories in top level of your Koofr
rclone lsd koofr:
List all the files in your Koofr
rclone ls koofr:
To copy a local directory to an Koofr directory called backup
rclone copy /home/source remote:backup
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/koofr/koofr.go then run make backenddocs -->
### Standard Options
Here are the standard options specific to koofr (Koofr).
#### --koofr-user
Your Koofr user name
- Config: user
- Env Var: RCLONE_KOOFR_USER
- Type: string
- Default: ""
#### --koofr-password
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
- Config: password
- Env Var: RCLONE_KOOFR_PASSWORD
- Type: string
- Default: ""
### Advanced Options
Here are the advanced options specific to koofr (Koofr).
#### --koofr-baseurl
Base URL of the Koofr API to connect to
- Config: baseurl
- Env Var: RCLONE_KOOFR_BASEURL
- Type: string
- Default: "https://app.koofr.net"
#### --koofr-mountid
Mount ID of the mount to use. If omitted, the primary mount is used.
- Config: mountid
- Env Var: RCLONE_KOOFR_MOUNTID
- Type: string
- Default: ""
<!--- autogenerated options stop -->
### Limitations ###
Note that Koofr is case insensitive so you can't have a file called
"Hello.doc" and one called "hello.doc".

View File

@@ -2,7 +2,7 @@
title: "Overview of cloud storage systems"
description: "Overview of cloud storage systems"
type: page
date: "2015-09-06"
date: "2019-02-25"
---
# Overview of cloud storage systems #
@@ -28,6 +28,7 @@ Here is an overview of the major features of each cloud storage system.
| HTTP | - | No | No | No | R |
| Hubic | MD5 | Yes | No | No | R/W |
| Jottacloud | MD5 | Yes | Yes | No | R/W |
| Koofr | MD5 | No | Yes | No | - |
| Mega | - | No | No | Yes | - |
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
| Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R |

View File

@@ -1112,6 +1112,11 @@ server_side_encryption =
storage_class =
```
If you are using an older version of CEPH, eg 10.2.x Jewel, then you
may need to supply the parameter `--s3-upload-cutoff 0` or put this in
the config file as `upload_cutoff 0` to work around a bug which causes
uploading of small files to fail.
Note also that Ceph sometimes puts `/` in the passwords it gives
users. If you read the secret access key using the command line tools
you will get a JSON blob with the `/` escaped as `\/`. Make sure you

View File

@@ -67,6 +67,7 @@
<li><a href="/http/"><i class="fa fa-globe"></i> HTTP</a></li>
<li><a href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a></li>
<li><a href="/jottacloud/"><i class="fa fa-cloud"></i> Jottacloud</a></li>
<li><a href="/koofr/"><i class="fa fa-suitcase"></i> Koofr</a></li>
<li><a href="/mega/"><i class="fa fa-archive"></i> Mega</a></li>
<li><a href="/azureblob/"><i class="fa fa-windows"></i> Microsoft Azure Blob Storage</a></li>
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft OneDrive</a></li>

View File

@@ -13,9 +13,15 @@ var (
Opt = filter.DefaultOpt
)
// Reload the filters from the flags
func Reload() (err error) {
filter.Active, err = filter.NewFilter(&Opt)
return err
}
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("filter", &Opt)
rc.AddOptionReload("filter", &Opt, Reload)
flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync")
flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule")
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file")

View File

@@ -16,8 +16,10 @@ import (
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors"
)
@@ -59,7 +61,7 @@ var (
ErrorNotAFile = errors.New("is a not a regular file")
ErrorNotDeleting = errors.New("not deleting files as there were IO errors")
ErrorNotDeletingDirs = errors.New("not deleting directories as there were IO errors")
ErrorCantMoveOverlapping = errors.New("can't move files on overlapping remotes")
ErrorOverlapping = errors.New("can't sync or move files on overlapping remotes")
ErrorDirectoryNotEmpty = errors.New("directory not empty")
ErrorImmutableModified = errors.New("immutable file modified")
ErrorPermissionDenied = errors.New("permission denied")
@@ -407,6 +409,7 @@ type Features struct {
BucketBased bool // is bucket based (like s3, swift etc)
SetTier bool // allows set tier functionality on objects
GetTier bool // allows to retrieve storage tier of objects
ServerSideAcrossConfigs bool // can server side copy between different remotes of the same type
// Purge all files in the root and the root directory
//
@@ -1112,3 +1115,81 @@ func GetModifyWindow(fss ...Info) time.Duration {
}
return window
}
// Pacer is a simple wrapper around a pacer.Pacer with logging.
type Pacer struct {
*pacer.Pacer
}
type logCalculator struct {
pacer.Calculator
}
// NewPacer creates a Pacer for the given Fs and Calculator.
func NewPacer(c pacer.Calculator) *Pacer {
p := &Pacer{
Pacer: pacer.New(
pacer.InvokerOption(pacerInvoker),
pacer.MaxConnectionsOption(Config.Checkers+Config.Transfers),
pacer.RetriesOption(Config.LowLevelRetries),
pacer.CalculatorOption(c),
),
}
p.SetCalculator(c)
return p
}
func (d *logCalculator) Calculate(state pacer.State) time.Duration {
oldSleepTime := state.SleepTime
newSleepTime := d.Calculator.Calculate(state)
if state.ConsecutiveRetries > 0 {
if newSleepTime != oldSleepTime {
Debugf("pacer", "Rate limited, increasing sleep to %v", newSleepTime)
}
} else {
if newSleepTime != oldSleepTime {
Debugf("pacer", "Reducing sleep to %v", newSleepTime)
}
}
return newSleepTime
}
// SetCalculator sets the pacing algorithm. Don't modify the Calculator object
// afterwards, use the ModifyCalculator method when needed.
//
// It will choose the default algorithm if nil is passed in.
func (p *Pacer) SetCalculator(c pacer.Calculator) {
switch c.(type) {
case *logCalculator:
Logf("pacer", "Invalid Calculator in fs.Pacer.SetCalculator")
case nil:
c = &logCalculator{pacer.NewDefault()}
default:
c = &logCalculator{c}
}
p.Pacer.SetCalculator(c)
}
// ModifyCalculator calls the given function with the currently configured
// Calculator and the Pacer lock held.
func (p *Pacer) ModifyCalculator(f func(pacer.Calculator)) {
p.ModifyCalculator(func(c pacer.Calculator) {
switch _c := c.(type) {
case *logCalculator:
f(_c.Calculator)
default:
Logf("pacer", "Invalid Calculator in fs.Pacer: %t", c)
f(c)
}
})
}
func pacerInvoker(try, retries int, f pacer.Paced) (retry bool, err error) {
retry, err = f()
if retry {
Debugf("pacer", "low level retry %d/%d (error %v)", try, retries, err)
err = fserrors.RetryError(err)
}
return
}

View File

@@ -2,8 +2,15 @@ package fs
import (
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
)
@@ -70,3 +77,47 @@ func TestOption(t *testing.T) {
err = d.Set("sdfsdf")
assert.Error(t, err)
}
var errFoo = errors.New("foo")
type dummyPaced struct {
retry bool
called int
wait *sync.Cond
}
func (dp *dummyPaced) fn() (bool, error) {
if dp.wait != nil {
dp.wait.L.Lock()
dp.wait.Wait()
dp.wait.L.Unlock()
}
dp.called++
return dp.retry, errFoo
}
func TestPacerCall(t *testing.T) {
expectedCalled := Config.LowLevelRetries
if expectedCalled == 0 {
expectedCalled = 20
Config.LowLevelRetries = expectedCalled
defer func() {
Config.LowLevelRetries = 0
}()
}
p := NewPacer(pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
dp := &dummyPaced{retry: true}
err := p.Call(dp.fn)
require.Equal(t, expectedCalled, dp.called)
require.Implements(t, (*fserrors.Retrier)(nil), err)
}
func TestPacerCallNoRetry(t *testing.T) {
p := NewPacer(pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
dp := &dummyPaced{retry: true}
err := p.CallNoRetry(dp.fn)
require.Equal(t, 1, dp.called)
require.Implements(t, (*fserrors.Retrier)(nil), err)
}

View File

@@ -194,7 +194,7 @@ func Cause(cause error) (retriable bool, err error) {
// this case.
err = prev
}
if err == prev {
if reflect.DeepEqual(err, prev) {
// Unpack any struct or *struct with a field
// of name Err which satisfies the error
// interface. This includes *url.Error,
@@ -215,7 +215,7 @@ func Cause(cause error) (retriable bool, err error) {
}
}
}
if err == prev {
if reflect.DeepEqual(err, prev) {
break
}
}

View File

@@ -10,6 +10,7 @@ import (
"io/ioutil"
"net/http"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
@@ -272,7 +273,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec
// Try server side copy first - if has optional interface and
// is same underlying remote
actionTaken = "Copied (server side copy)"
if doCopy := f.Features().Copy; doCopy != nil && SameConfig(src.Fs(), f) {
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
newDst, err = doCopy(src, remote)
if err == nil {
dst = newDst
@@ -391,7 +392,7 @@ func Move(fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Ob
return newDst, nil
}
// See if we have Move available
if doMove := fdst.Features().Move; doMove != nil && SameConfig(src.Fs(), fdst) {
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) {
// Delete destination if it exists
if dst != nil {
err = DeleteFile(dst)
@@ -524,6 +525,11 @@ func DeleteFiles(toBeDeleted fs.ObjectsChan) error {
return DeleteFilesWithBackupDir(toBeDeleted, nil)
}
// SameRemoteType returns true if fdst and fsrc are the same type
func SameRemoteType(fdst, fsrc fs.Info) bool {
return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc)
}
// SameConfig returns true if fdst and fsrc are using the same config
// file entry
func SameConfig(fdst, fsrc fs.Info) bool {
@@ -532,7 +538,7 @@ func SameConfig(fdst, fsrc fs.Info) bool {
// Same returns true if fdst and fsrc point to the same underlying Fs
func Same(fdst, fsrc fs.Info) bool {
return SameConfig(fdst, fsrc) && fdst.Root() == fsrc.Root()
return SameConfig(fdst, fsrc) && strings.Trim(fdst.Root(), "/") == strings.Trim(fsrc.Root(), "/")
}
// Overlapping returns true if fdst and fsrc point to the same
@@ -543,7 +549,7 @@ func Overlapping(fdst, fsrc fs.Info) bool {
}
// Return the Root with a trailing / if not empty
fixedRoot := func(f fs.Info) string {
s := strings.Trim(f.Root(), "/")
s := strings.Trim(filepath.ToSlash(f.Root()), "/")
if s != "" {
s += "/"
}
@@ -1479,8 +1485,7 @@ type ListFormat struct {
separator string
dirSlash bool
absolute bool
output []func() string
entry fs.DirEntry
output []func(entry *ListJSONItem) string
csv *csv.Writer
buf bytes.Buffer
}
@@ -1516,76 +1521,91 @@ func (l *ListFormat) SetCSV(useCSV bool) {
}
// SetOutput sets functions used to create files information
func (l *ListFormat) SetOutput(output []func() string) {
func (l *ListFormat) SetOutput(output []func(entry *ListJSONItem) string) {
l.output = output
}
// AddModTime adds file's Mod Time to output
func (l *ListFormat) AddModTime() {
l.AppendOutput(func() string { return l.entry.ModTime().Local().Format("2006-01-02 15:04:05") })
l.AppendOutput(func(entry *ListJSONItem) string {
return entry.ModTime.When.Local().Format("2006-01-02 15:04:05")
})
}
// AddSize adds file's size to output
func (l *ListFormat) AddSize() {
l.AppendOutput(func() string {
return strconv.FormatInt(l.entry.Size(), 10)
l.AppendOutput(func(entry *ListJSONItem) string {
return strconv.FormatInt(entry.Size, 10)
})
}
// normalisePath makes sure the path has the correct slashes for the current mode
func (l *ListFormat) normalisePath(entry *ListJSONItem, remote string) string {
if l.absolute && !strings.HasPrefix(remote, "/") {
remote = "/" + remote
}
if entry.IsDir && l.dirSlash {
remote += "/"
}
return remote
}
// AddPath adds path to file to output
func (l *ListFormat) AddPath() {
l.AppendOutput(func() string {
remote := l.entry.Remote()
if l.absolute && !strings.HasPrefix(remote, "/") {
remote = "/" + remote
}
_, isDir := l.entry.(fs.Directory)
if isDir && l.dirSlash {
remote += "/"
}
return remote
l.AppendOutput(func(entry *ListJSONItem) string {
return l.normalisePath(entry, entry.Path)
})
}
// AddEncrypted adds the encrypted path to file to output
func (l *ListFormat) AddEncrypted() {
l.AppendOutput(func(entry *ListJSONItem) string {
return l.normalisePath(entry, entry.Encrypted)
})
}
// AddHash adds the hash of the type given to the output
func (l *ListFormat) AddHash(ht hash.Type) {
l.AppendOutput(func() string {
o, ok := l.entry.(fs.Object)
if !ok {
hashName := ht.String()
l.AppendOutput(func(entry *ListJSONItem) string {
if entry.IsDir {
return ""
}
return hashSum(ht, o)
return entry.Hashes[hashName]
})
}
// AddID adds file's ID to the output if known
func (l *ListFormat) AddID() {
l.AppendOutput(func() string {
if do, ok := l.entry.(fs.IDer); ok {
return do.ID()
}
return ""
l.AppendOutput(func(entry *ListJSONItem) string {
return entry.ID
})
}
// AddOrigID adds file's Original ID to the output if known
func (l *ListFormat) AddOrigID() {
l.AppendOutput(func(entry *ListJSONItem) string {
return entry.OrigID
})
}
// AddMimeType adds file's MimeType to the output if known
func (l *ListFormat) AddMimeType() {
l.AppendOutput(func() string {
return fs.MimeTypeDirEntry(l.entry)
l.AppendOutput(func(entry *ListJSONItem) string {
return entry.MimeType
})
}
// AppendOutput adds string generated by specific function to printed output
func (l *ListFormat) AppendOutput(functionToAppend func() string) {
func (l *ListFormat) AppendOutput(functionToAppend func(item *ListJSONItem) string) {
l.output = append(l.output, functionToAppend)
}
// Format prints information about the DirEntry in the format defined
func (l *ListFormat) Format(entry fs.DirEntry) (result string) {
l.entry = entry
func (l *ListFormat) Format(entry *ListJSONItem) (result string) {
var out []string
for _, fun := range l.output {
out = append(out, fun())
out = append(out, fun(entry))
}
if l.csv != nil {
l.buf.Reset()

View File

@@ -39,7 +39,6 @@ import (
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/list"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
@@ -778,6 +777,7 @@ func TestSame(t *testing.T) {
func TestOverlapping(t *testing.T) {
a := &testFsInfo{name: "name", root: "root"}
slash := string(os.PathSeparator) // native path separator
for _, test := range []struct {
name string
root string
@@ -790,6 +790,8 @@ func TestOverlapping(t *testing.T) {
{"name", "roo", false},
{"name", "root/toot", true},
{"name", "root/toot/", true},
{"name", "root" + slash + "toot", true},
{"name", "root" + slash + "toot" + slash, true},
{"name", "", true},
{"name", "/", true},
} {
@@ -873,61 +875,90 @@ func TestCheckEqualReaders(t *testing.T) {
}
func TestListFormat(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
file1 := r.WriteObject("a", "a", t1)
file2 := r.WriteObject("subdir/b", "b", t1)
item0 := &operations.ListJSONItem{
Path: "a",
Name: "a",
Encrypted: "encryptedFileName",
Size: 1,
MimeType: "application/octet-stream",
ModTime: operations.Timestamp{
When: t1,
Format: "2006-01-02T15:04:05.000000000Z07:00"},
IsDir: false,
Hashes: map[string]string{
"MD5": "0cc175b9c0f1b6a831c399e269772661",
"SHA-1": "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8",
"DropboxHash": "bf5d3affb73efd2ec6c36ad3112dd933efed63c4e1cbffcfa88e2759c144f2d8",
"QuickXorHash": "6100000000000000000000000100000000000000"},
ID: "fileID",
OrigID: "fileOrigID",
}
fstest.CheckItems(t, r.Fremote, file1, file2)
item1 := &operations.ListJSONItem{
Path: "subdir",
Name: "subdir",
Encrypted: "encryptedDirName",
Size: -1,
MimeType: "inode/directory",
ModTime: operations.Timestamp{
When: t2,
Format: "2006-01-02T15:04:05.000000000Z07:00"},
IsDir: true,
Hashes: map[string]string(nil),
ID: "dirID",
OrigID: "dirOrigID",
}
items, _ := list.DirSorted(r.Fremote, true, "")
var list operations.ListFormat
list.AddPath()
list.SetDirSlash(false)
assert.Equal(t, "subdir", list.Format(items[1]))
assert.Equal(t, "subdir", list.Format(item1))
list.SetDirSlash(true)
assert.Equal(t, "subdir/", list.Format(items[1]))
assert.Equal(t, "subdir/", list.Format(item1))
list.SetOutput(nil)
assert.Equal(t, "", list.Format(items[1]))
assert.Equal(t, "", list.Format(item1))
list.AppendOutput(func() string { return "a" })
list.AppendOutput(func() string { return "b" })
assert.Equal(t, "ab", list.Format(items[1]))
list.AppendOutput(func(item *operations.ListJSONItem) string { return "a" })
list.AppendOutput(func(item *operations.ListJSONItem) string { return "b" })
assert.Equal(t, "ab", list.Format(item1))
list.SetSeparator(":::")
assert.Equal(t, "a:::b", list.Format(items[1]))
assert.Equal(t, "a:::b", list.Format(item1))
list.SetOutput(nil)
list.AddModTime()
assert.Equal(t, items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0]))
assert.Equal(t, t1.Local().Format("2006-01-02 15:04:05"), list.Format(item0))
list.SetOutput(nil)
list.SetSeparator("|")
list.AddID()
_ = list.Format(items[0]) // Can't really check anything - at least it didn't panic!
list.AddOrigID()
assert.Equal(t, "fileID|fileOrigID", list.Format(item0))
assert.Equal(t, "dirID|dirOrigID", list.Format(item1))
list.SetOutput(nil)
list.AddMimeType()
assert.Contains(t, list.Format(items[0]), "/")
assert.Equal(t, "inode/directory", list.Format(items[1]))
assert.Contains(t, list.Format(item0), "/")
assert.Equal(t, "inode/directory", list.Format(item1))
list.SetOutput(nil)
list.AddPath()
list.SetAbsolute(true)
assert.Equal(t, "/a", list.Format(items[0]))
assert.Equal(t, "/a", list.Format(item0))
list.SetAbsolute(false)
assert.Equal(t, "a", list.Format(items[0]))
assert.Equal(t, "a", list.Format(item0))
list.SetOutput(nil)
list.AddSize()
assert.Equal(t, "1", list.Format(items[0]))
assert.Equal(t, "1", list.Format(item0))
list.AddPath()
list.AddModTime()
list.SetDirSlash(true)
list.SetSeparator("__SEP__")
assert.Equal(t, "1__SEP__a__SEP__"+items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0]))
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"__SEP__subdir/__SEP__"+items[1].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[1]))
assert.Equal(t, "1__SEP__a__SEP__"+t1.Local().Format("2006-01-02 15:04:05"), list.Format(item0))
assert.Equal(t, "-1__SEP__subdir/__SEP__"+t2.Local().Format("2006-01-02 15:04:05"), list.Format(item1))
for _, test := range []struct {
ht hash.Type
@@ -939,10 +970,7 @@ func TestListFormat(t *testing.T) {
} {
list.SetOutput(nil)
list.AddHash(test.ht)
got := list.Format(items[0])
if got != "UNSUPPORTED" && got != "" {
assert.Equal(t, test.want, got)
}
assert.Equal(t, test.want, list.Format(item0))
}
list.SetOutput(nil)
@@ -952,8 +980,15 @@ func TestListFormat(t *testing.T) {
list.AddPath()
list.AddModTime()
list.SetDirSlash(true)
assert.Equal(t, "1|a|"+items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0]))
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"|subdir/|"+items[1].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[1]))
assert.Equal(t, "1|a|"+t1.Local().Format("2006-01-02 15:04:05"), list.Format(item0))
assert.Equal(t, "-1|subdir/|"+t2.Local().Format("2006-01-02 15:04:05"), list.Format(item1))
list.SetOutput(nil)
list.SetSeparator("|")
list.AddPath()
list.AddEncrypted()
assert.Equal(t, "a|encryptedFileName", list.Format(item0))
assert.Equal(t, "subdir/|encryptedDirName/", list.Format(item1))
}

View File

@@ -8,13 +8,23 @@ import (
"github.com/pkg/errors"
)
var optionBlock = map[string]interface{}{}
var (
optionBlock = map[string]interface{}{}
optionReload = map[string]func() error{}
)
// AddOption adds an option set
func AddOption(name string, option interface{}) {
optionBlock[name] = option
}
// AddOptionReload adds an option set with a reload function to be
// called when options are changed
func AddOptionReload(name string, option interface{}, reload func() error) {
optionBlock[name] = option
optionReload[name] = reload
}
func init() {
Add(Call{
Path: "options/blocks",
@@ -103,7 +113,12 @@ func rcOptionsSet(in Params) (out Params, err error) {
if err != nil {
return nil, errors.Wrapf(err, "failed to write options from block %q", name)
}
if reload := optionReload[name]; reload != nil {
err = reload()
if err != nil {
return nil, errors.Wrapf(err, "failed to reload options from block %q", name)
}
}
}
return out, nil
}

View File

@@ -1,8 +1,10 @@
package rc
import (
"fmt"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -24,9 +26,21 @@ func TestAddOption(t *testing.T) {
assert.Equal(t, len(optionBlock), 0)
AddOption("potato", &testOptions)
assert.Equal(t, len(optionBlock), 1)
assert.Equal(t, len(optionReload), 0)
assert.Equal(t, &testOptions, optionBlock["potato"])
}
func TestAddOptionReload(t *testing.T) {
defer clearOptionBlock()
assert.Equal(t, len(optionBlock), 0)
reload := func() error { return nil }
AddOptionReload("potato", &testOptions, reload)
assert.Equal(t, len(optionBlock), 1)
assert.Equal(t, len(optionReload), 1)
assert.Equal(t, &testOptions, optionBlock["potato"])
assert.Equal(t, fmt.Sprintf("%p", reload), fmt.Sprintf("%p", optionReload["potato"]))
}
func TestOptionsBlocks(t *testing.T) {
defer clearOptionBlock()
AddOption("potato", &testOptions)
@@ -53,7 +67,14 @@ func TestOptionsGet(t *testing.T) {
func TestOptionsSet(t *testing.T) {
defer clearOptionBlock()
AddOption("potato", &testOptions)
var reloaded int
AddOptionReload("potato", &testOptions, func() error {
if reloaded > 0 {
return errors.New("error while reloading")
}
reloaded++
return nil
})
call := Calls.Get("options/set")
require.NotNil(t, call)
@@ -67,6 +88,12 @@ func TestOptionsSet(t *testing.T) {
require.Nil(t, out)
assert.Equal(t, 50, testOptions.Int)
assert.Equal(t, "hello", testOptions.String)
assert.Equal(t, 1, reloaded)
// error from reload
_, err = call.Fn(in)
require.Error(t, err)
assert.Contains(t, err.Error(), "error while reloading")
// unknown option block
in = Params{
@@ -85,4 +112,5 @@ func TestOptionsSet(t *testing.T) {
_, err = call.Fn(in)
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to write options")
}

View File

@@ -39,17 +39,21 @@ func rcSyncCopyMove(in rc.Params, name string) (out rc.Params, err error) {
if err != nil {
return nil, err
}
createEmptySrcDirs, err := in.GetBool("createEmptySrcDirs")
if rc.NotErrParamNotFound(err) {
return nil, err
}
switch name {
case "sync":
return nil, Sync(dstFs, srcFs)
return nil, Sync(dstFs, srcFs, createEmptySrcDirs)
case "copy":
return nil, CopyDir(dstFs, srcFs)
return nil, CopyDir(dstFs, srcFs, createEmptySrcDirs)
case "move":
deleteEmptySrcDirs, err := in.GetBool("deleteEmptySrcDirs")
if rc.NotErrParamNotFound(err) {
return nil, err
}
return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs)
return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs, createEmptySrcDirs)
}
panic("unknown rcSyncCopyMove type")
}

View File

@@ -24,6 +24,7 @@ type syncCopyMove struct {
fsrc fs.Fs
deleteMode fs.DeleteMode // how we are doing deletions
DoMove bool
copyEmptySrcDirs bool
deleteEmptySrcDirs bool
dir string
// internal state
@@ -63,12 +64,16 @@ type syncCopyMove struct {
suffix string // suffix to add to files placed in backupDir
}
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) (*syncCopyMove, error) {
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) {
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.Overlapping(fdst, fsrc) {
return nil, fserrors.FatalError(fs.ErrorOverlapping)
}
s := &syncCopyMove{
fdst: fdst,
fsrc: fsrc,
deleteMode: deleteMode,
DoMove: DoMove,
copyEmptySrcDirs: copyEmptySrcDirs,
deleteEmptySrcDirs: deleteEmptySrcDirs,
dir: "",
srcFilesChan: make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers),
@@ -686,7 +691,9 @@ func (s *syncCopyMove) run() error {
s.stopTransfers()
s.stopDeleters()
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs))
if s.copyEmptySrcDirs {
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs))
}
// Delete files after
if s.deleteMode == fs.DeleteModeAfter {
@@ -849,7 +856,7 @@ func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
// If DoMove is true then files will be moved instead of copied
//
// dir is the start directory, "" for root
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) error {
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
if deleteMode != fs.DeleteModeOff && DoMove {
return fserrors.FatalError(errors.New("can't delete and move at the same time"))
}
@@ -859,7 +866,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
}
// only delete stuff during in this pass
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs)
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs)
if err != nil {
return err
}
@@ -870,7 +877,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
// Next pass does a copy only
deleteMode = fs.DeleteModeOff
}
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs)
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs)
if err != nil {
return err
}
@@ -878,22 +885,22 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
}
// Sync fsrc into fdst
func Sync(fdst, fsrc fs.Fs) error {
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false)
func Sync(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false, copyEmptySrcDirs)
}
// CopyDir copies fsrc into fdst
func CopyDir(fdst, fsrc fs.Fs) error {
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false)
func CopyDir(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs)
}
// moveDir moves fsrc into fdst
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs)
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs)
}
// MoveDir moves fsrc into fdst
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
if operations.Same(fdst, fsrc) {
fs.Errorf(fdst, "Nothing to do as source and destination are the same")
return nil
@@ -920,13 +927,6 @@ func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
}
}
// The two remotes mustn't overlap if we didn't do server side move
if operations.Overlapping(fdst, fsrc) {
err := fs.ErrorCantMoveOverlapping
fs.Errorf(fdst, "%v", err)
return err
}
// Otherwise move the files one by one
return moveDir(fdst, fsrc, deleteEmptySrcDirs)
return moveDir(fdst, fsrc, deleteEmptySrcDirs, copyEmptySrcDirs)
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest"
@@ -39,7 +40,7 @@ func TestCopyWithDryRun(t *testing.T) {
r.Mkdir(r.Fremote)
fs.Config.DryRun = true
err := CopyDir(r.Fremote, r.Flocal)
err := CopyDir(r.Fremote, r.Flocal, false)
fs.Config.DryRun = false
require.NoError(t, err)
@@ -54,7 +55,7 @@ func TestCopy(t *testing.T) {
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
r.Mkdir(r.Fremote)
err := CopyDir(r.Fremote, r.Flocal)
err := CopyDir(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -71,7 +72,7 @@ func TestCopyNoTraverse(t *testing.T) {
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
err := CopyDir(r.Fremote, r.Flocal)
err := CopyDir(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -89,7 +90,7 @@ func TestSyncNoTraverse(t *testing.T) {
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -107,7 +108,7 @@ func TestCopyWithDepth(t *testing.T) {
fs.Config.MaxDepth = 1
defer func() { fs.Config.MaxDepth = -1 }()
err := CopyDir(r.Fremote, r.Flocal)
err := CopyDir(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1, file2)
@@ -135,7 +136,7 @@ func TestCopyWithFilesFrom(t *testing.T) {
}
defer unpatch()
err = CopyDir(r.Fremote, r.Flocal)
err = CopyDir(r.Fremote, r.Flocal, false)
require.NoError(t, err)
unpatch()
@@ -152,7 +153,59 @@ func TestCopyEmptyDirectories(t *testing.T) {
require.NoError(t, err)
r.Mkdir(r.Fremote)
err = CopyDir(r.Fremote, r.Flocal)
err = CopyDir(r.Fremote, r.Flocal, true)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1,
},
[]string{
"sub dir",
"sub dir2",
},
fs.GetModifyWindow(r.Fremote),
)
}
// Test move empty directories
func TestMoveEmptyDirectories(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
err := operations.Mkdir(r.Flocal, "sub dir2")
require.NoError(t, err)
r.Mkdir(r.Fremote)
err = MoveDir(r.Fremote, r.Flocal, false, true)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1,
},
[]string{
"sub dir",
"sub dir2",
},
fs.GetModifyWindow(r.Fremote),
)
}
// Test sync empty directories
func TestSyncEmptyDirectories(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
err := operations.Mkdir(r.Flocal, "sub dir2")
require.NoError(t, err)
r.Mkdir(r.Fremote)
err = Sync(r.Fremote, r.Flocal, true)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
@@ -181,7 +234,7 @@ func TestServerSideCopy(t *testing.T) {
defer finaliseCopy()
t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy)
err = CopyDir(FremoteCopy, r.Fremote)
err = CopyDir(FremoteCopy, r.Fremote, false)
require.NoError(t, err)
fstest.CheckItems(t, FremoteCopy, file1)
@@ -199,7 +252,7 @@ func TestCopyAfterDelete(t *testing.T) {
err := operations.Mkdir(r.Flocal, "")
require.NoError(t, err)
err = CopyDir(r.Fremote, r.Flocal)
err = CopyDir(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal)
@@ -213,7 +266,7 @@ func TestCopyRedownload(t *testing.T) {
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
fstest.CheckItems(t, r.Fremote, file1)
err := CopyDir(r.Flocal, r.Fremote)
err := CopyDir(r.Flocal, r.Fremote, false)
require.NoError(t, err)
// Test with combined precision of local and remote as we copied it there and back
@@ -233,7 +286,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
// We should have transferred exactly one file.
@@ -245,7 +298,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
err = Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
// We should have transferred no files
@@ -267,7 +320,7 @@ func TestSyncSizeOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
// We should have transferred exactly one file.
@@ -279,7 +332,7 @@ func TestSyncSizeOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
err = Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
// We should have transferred no files
@@ -301,7 +354,7 @@ func TestSyncIgnoreSize(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
// We should have transferred exactly one file.
@@ -313,7 +366,7 @@ func TestSyncIgnoreSize(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
err = Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
// We should have transferred no files
@@ -329,7 +382,7 @@ func TestSyncIgnoreTimes(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
// We should have transferred exactly 0 files because the
@@ -340,7 +393,7 @@ func TestSyncIgnoreTimes(t *testing.T) {
defer func() { fs.Config.IgnoreTimes = false }()
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
err = Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
// We should have transferred exactly one file even though the
@@ -360,7 +413,7 @@ func TestSyncIgnoreExisting(t *testing.T) {
defer func() { fs.Config.IgnoreExisting = false }()
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file1)
@@ -368,7 +421,7 @@ func TestSyncIgnoreExisting(t *testing.T) {
// Change everything
r.WriteFile("existing", "newpotatoes", t2)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
err = Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
// Items should not change
fstest.CheckItems(t, r.Fremote, file1)
@@ -416,7 +469,7 @@ func TestSyncIgnoreErrors(t *testing.T) {
accounting.Stats.ResetCounters()
fs.CountError(nil)
assert.NoError(t, Sync(r.Fremote, r.Flocal))
assert.NoError(t, Sync(r.Fremote, r.Flocal, false))
fstest.CheckListingWithPrecision(
t,
@@ -459,7 +512,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
defer func() { fs.Config.DryRun = false }()
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -468,7 +521,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
fs.Config.DryRun = false
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
err = Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -496,7 +549,7 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -517,7 +570,7 @@ func TestSyncDoesntUpdateModtime(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -537,7 +590,7 @@ func TestSyncAfterAddingAFile(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1, file2)
fstest.CheckItems(t, r.Fremote, file1, file2)
@@ -552,7 +605,7 @@ func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file2)
@@ -575,7 +628,7 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file2)
@@ -591,7 +644,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
fs.Config.DryRun = true
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
fs.Config.DryRun = false
require.NoError(t, err)
@@ -610,7 +663,7 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1, file3)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1, file3)
fstest.CheckItems(t, r.Fremote, file1, file3)
@@ -656,7 +709,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDir(t *testing.T) {
)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
@@ -726,7 +779,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
accounting.Stats.ResetCounters()
fs.CountError(nil)
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
assert.Equal(t, fs.ErrorNotDeleting, err)
fstest.CheckListingWithPrecision(
@@ -803,7 +856,7 @@ func TestCopyDeleteBefore(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters()
err := CopyDir(r.Fremote, r.Flocal)
err := CopyDir(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file1, file2)
@@ -826,14 +879,14 @@ func TestSyncWithExclude(t *testing.T) {
}()
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file2, file1)
// Now sync the other way round and check enormous doesn't get
// deleted as it is excluded from the sync
accounting.Stats.ResetCounters()
err = Sync(r.Flocal, r.Fremote)
err = Sync(r.Flocal, r.Fremote, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2, file1, file3)
}
@@ -856,14 +909,14 @@ func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
}()
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file2)
// Check sync the other way round to make sure enormous gets
// deleted even though it is excluded
accounting.Stats.ResetCounters()
err = Sync(r.Flocal, r.Fremote)
err = Sync(r.Flocal, r.Fremote, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2)
}
@@ -898,7 +951,7 @@ func TestSyncWithUpdateOlder(t *testing.T) {
}()
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, oneO, twoF, threeO, fourF, fiveF)
}
@@ -922,7 +975,7 @@ func TestSyncWithTrackRenames(t *testing.T) {
f2 := r.WriteFile("yam", "Yam Content", t2)
accounting.Stats.ResetCounters()
require.NoError(t, Sync(r.Fremote, r.Flocal))
require.NoError(t, Sync(r.Fremote, r.Flocal, false))
fstest.CheckItems(t, r.Fremote, f1, f2)
fstest.CheckItems(t, r.Flocal, f1, f2)
@@ -931,7 +984,7 @@ func TestSyncWithTrackRenames(t *testing.T) {
f2 = r.RenameFile(f2, "yaml")
accounting.Stats.ResetCounters()
require.NoError(t, Sync(r.Fremote, r.Flocal))
require.NoError(t, Sync(r.Fremote, r.Flocal, false))
fstest.CheckItems(t, r.Fremote, f1, f2)
@@ -968,7 +1021,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
// Do server side move
accounting.Stats.ResetCounters()
err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs)
err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs, false)
require.NoError(t, err)
if withFilter {
@@ -995,7 +1048,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
// Move it back to a new empty remote, dst does not exist this time
accounting.Stats.ResetCounters()
err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs)
err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs, false)
require.NoError(t, err)
if withFilter {
@@ -1020,7 +1073,7 @@ func TestMoveWithDeleteEmptySrcDirs(t *testing.T) {
r.Mkdir(r.Fremote)
// run move with --delete-empty-src-dirs
err := MoveDir(r.Fremote, r.Flocal, true)
err := MoveDir(r.Fremote, r.Flocal, true, false)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
@@ -1040,7 +1093,7 @@ func TestMoveWithoutDeleteEmptySrcDirs(t *testing.T) {
file2 := r.WriteFile("nested/sub dir/file", "nested", t1)
r.Mkdir(r.Fremote)
err := MoveDir(r.Fremote, r.Flocal, false)
err := MoveDir(r.Fremote, r.Flocal, false, false)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
@@ -1101,16 +1154,37 @@ func TestServerSideMoveOverlap(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1)
// Subdir move with no filters should return ErrorCantMoveOverlapping
err = MoveDir(FremoteMove, r.Fremote, false)
assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error())
err = MoveDir(FremoteMove, r.Fremote, false, false)
assert.EqualError(t, err, fs.ErrorOverlapping.Error())
// Now try with a filter which should also fail with ErrorCantMoveOverlapping
filter.Active.Opt.MinSize = 40
defer func() {
filter.Active.Opt.MinSize = -1
}()
err = MoveDir(FremoteMove, r.Fremote, false)
assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error())
err = MoveDir(FremoteMove, r.Fremote, false, false)
assert.EqualError(t, err, fs.ErrorOverlapping.Error())
}
// Test a sync with overlap
func TestSyncOverlap(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
subRemoteName := r.FremoteName + "/rclone-sync-test"
FremoteSync, err := fs.NewFs(subRemoteName)
require.NoError(t, err)
checkErr := func(err error) {
require.Error(t, err)
assert.True(t, fserrors.IsFatalError(err))
assert.Equal(t, fs.ErrorOverlapping.Error(), err.Error())
}
checkErr(Sync(FremoteSync, r.Fremote, false))
checkErr(Sync(r.Fremote, FremoteSync, false))
checkErr(Sync(r.Fremote, r.Fremote, false))
checkErr(Sync(FremoteSync, FremoteSync, false))
}
// Test with BackupDir set
@@ -1145,7 +1219,7 @@ func testSyncBackupDir(t *testing.T, suffix string) {
require.NoError(t, err)
accounting.Stats.ResetCounters()
err = Sync(fdst, r.Flocal)
err = Sync(fdst, r.Flocal, false)
require.NoError(t, err)
// one should be moved to the backup dir and the new one installed
@@ -1166,7 +1240,7 @@ func testSyncBackupDir(t *testing.T, suffix string) {
// This should delete three and overwrite one again, checking
// the files got overwritten correctly in backup-dir
accounting.Stats.ResetCounters()
err = Sync(fdst, r.Flocal)
err = Sync(fdst, r.Flocal, false)
require.NoError(t, err)
// one should be moved to the backup dir and the new one installed
@@ -1203,7 +1277,7 @@ func TestSyncUTFNorm(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
// We should have transferred exactly one file, but kept the
@@ -1229,7 +1303,7 @@ func TestSyncImmutable(t *testing.T) {
// Should succeed
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file1)
@@ -1241,7 +1315,7 @@ func TestSyncImmutable(t *testing.T) {
// Should fail with ErrorImmutableModified and not modify local or remote files
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
err = Sync(r.Fremote, r.Flocal, false)
assert.EqualError(t, err, fs.ErrorImmutableModified.Error())
fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file1)
@@ -1277,6 +1351,6 @@ func TestAbort(t *testing.T) {
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
err := Sync(r.Fremote, r.Flocal, false)
assert.Equal(t, accounting.ErrorMaxTransferLimitReached, err)
}

View File

@@ -138,3 +138,7 @@ backends:
remote: "TestUnion:"
subdir: false
fastlist: false
- backend: "koofr"
remote: "TestKoofr:"
subdir: false
fastlist: false

7
go.mod
View File

@@ -23,6 +23,8 @@ require (
github.com/jlaffaye/ftp v0.0.0-20190126081051-8019e6774408
github.com/jtolds/gls v4.2.1+incompatible // indirect
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a // indirect
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff
github.com/kr/fs v0.1.0 // indirect
github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
@@ -42,7 +44,6 @@ require (
github.com/spf13/pflag v1.0.3
github.com/stretchr/testify v1.3.0
github.com/t3rm1n4l/go-mega v0.0.0-20190205172012-55a226cf41da
github.com/thinkhy/go-adb v0.0.0-20190123053734-b4b48de70418
github.com/xanzy/ssh-agent v0.2.0
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible
go.etcd.io/bbolt v1.3.2 // indirect
@@ -50,12 +51,10 @@ require (
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4
golang.org/x/sys v0.0.0-20190213121743-983097b1a8a3
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c
google.golang.org/api v0.1.0
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/yaml.v2 v2.2.2
)
replace github.com/thinkhy/go-adb v0.0.0-20190123053734-b4b48de70418 => ../../../github.com/thinkhy/go-adb

27
go.sum
View File

@@ -23,9 +23,6 @@ github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4 h1:mK1/QgFPU4osbhjJ26B1w7
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg=
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5 h1:lmyFvZXNGOmsKCYXNwzDLWafnxeewxsFwdsvTvSC1sg=
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5/go.mod h1:DGqLjaZ3ziKKNRt+U5Q9PLWJ52Q/4rxfaaH/b3QYKaE=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
@@ -35,7 +32,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/billziss-gh/cgofuse v1.1.0 h1:tATn9ZDvuPcOVlvR4tJitGHgAqy1y18+4mKmRfdfjec=
github.com/billziss-gh/cgofuse v1.1.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/cheggaaa/pb v2.0.6+incompatible/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
@@ -79,10 +75,6 @@ github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4=
github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
@@ -95,6 +87,10 @@ github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVY
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro=
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a h1:W+gnfphB7WpRj0rbTF40e3edULfri4fou2kUFw6AF3A=
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a/go.mod h1:3xszwh+rNrYk1r9SStc4iJ326gne1OaBcrdB1ACsbzI=
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff h1:GlfzG8bgyoJYz+5sMvGpYnHrg4veNVNnDGuE9hTEMHk=
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff/go.mod h1:MRAz4Gsxd+OzrZ0owwrUHc0zLESL+1Y5syqK/sJxK2A=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@@ -103,8 +99,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@@ -179,7 +173,6 @@ github.com/spf13/cobra v0.0.4-0.20190109003409-7547e83b2d85/go.mod h1:1l0Ry5zgKv
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
@@ -189,8 +182,6 @@ github.com/t3rm1n4l/go-mega v0.0.0-20190205172012-55a226cf41da/go.mod h1:XWL4vDy
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
github.com/yosemite-open/go-adb v0.0.0-20181206003817-d40962019194 h1:hQ7oP/X/5JR3gGKEEEZU3uihkDePXlFoTwr0XDu5CKg=
github.com/yosemite-open/go-adb v0.0.0-20181206003817-d40962019194/go.mod h1:OoY1zUwKq/hv/6hBuQxzSRNu1XZ289eXaDNgoHa+3lU=
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible h1:/Z0q3/eSMoPYAuRmhjWtuGSmVVciFC6hfm3yfCKuvz0=
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible/go.mod h1:w6wqLDQ5bBTzxGJ55581UrSwLrsTAsdo9N6yX/8d9RY=
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
@@ -230,10 +221,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952 h1:FDfvYgoVsA7TTZSbgiqjAbfPbK47CNHdWl3h/PJtii0=
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190213121743-983097b1a8a3 h1:+KlxhGbYkFs8lMfwKn+2ojry1ID5eBSMXprS2u/wqCE=
golang.org/x/sys v0.0.0-20190213121743-983097b1a8a3/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
@@ -261,18 +248,12 @@ google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
gopkg.in/VividCortex/ewma.v1 v1.1.1/go.mod h1:TekXuFipeiHWiAlO1+wSS23vTcyFau5u3rxXUSXj710=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v2 v2.0.6/go.mod h1:0CiZ1p8pvtxBlQpLXkHuUTpdJ1shm3OqCF1QugkjHL4=
gopkg.in/fatih/color.v1 v1.7.0/go.mod h1:P7yosIhqIl/sX8J8UypY5M+dDpD2KmyfP5IRs5v/fo0=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/mattn/go-colorable.v0 v0.1.0/go.mod h1:BVJlBXzARQxdi3nZo6f6bnl5yR20/tOL6p+V0KejgSY=
gopkg.in/mattn/go-isatty.v0 v0.0.4/go.mod h1:wt691ab7g0X4ilKZNmMII3egK0bTxl37fEn/Fwbd8gc=
gopkg.in/mattn/go-runewidth.v0 v0.0.4/go.mod h1:BmXejnxvhwdaATwiJbB1vZ2dtXkQKZGu9yLFCZb4msQ=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=

View File

@@ -13,23 +13,32 @@ import (
)
var (
fns []func()
fns = make(map[FnHandle]bool)
fnsMutex sync.Mutex
exitChan chan os.Signal
exitOnce sync.Once
registerOnce sync.Once
)
// Register a function to be called on exit
func Register(fn func()) {
fns = append(fns, fn)
// FnHandle is the type of the handle returned by function `Register`
// that can be used to unregister an at-exit function
type FnHandle *func()
// Register a function to be called on exit.
// Returns a handle which can be used to unregister the function with `Unregister`.
func Register(fn func()) FnHandle {
fnsMutex.Lock()
fns[&fn] = true
fnsMutex.Unlock()
// Run AtExit handlers on SIGINT or SIGTERM so everything gets
// tidied up properly
registerOnce.Do(func() {
exitChan = make(chan os.Signal, 1)
signal.Notify(exitChan, os.Interrupt) // syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT
go func() {
sig, closed := <-exitChan
if closed || sig == nil {
sig := <-exitChan
if sig == nil {
return
}
fs.Infof(nil, "Signal received: %s", sig)
@@ -38,6 +47,15 @@ func Register(fn func()) {
os.Exit(0)
}()
})
return &fn
}
// Unregister a function using the handle returned by `Register`
func Unregister(handle FnHandle) {
fnsMutex.Lock()
defer fnsMutex.Unlock()
delete(fns, handle)
}
// IgnoreSignals disables the signal handler and prevents Run from beeing executed automatically
@@ -53,8 +71,10 @@ func IgnoreSignals() {
// Run all the at exit functions if they haven't been run already
func Run() {
exitOnce.Do(func() {
for _, fn := range fns {
fn()
fnsMutex.Lock()
defer fnsMutex.Unlock()
for fnHandle := range fns {
(*fnHandle)()
}
})
}

76
lib/errors/errors.go Normal file
View File

@@ -0,0 +1,76 @@
package errors
import (
"errors"
"fmt"
"reflect"
)
// New returns an error that formats as the given text.
func New(text string) error {
return errors.New(text)
}
// Errorf formats according to a format specifier and returns the string
// as a value that satisfies error.
func Errorf(format string, a ...interface{}) error {
return fmt.Errorf(format, a...)
}
// WalkFunc is the signature of the Walk callback function. The function gets the
// current error in the chain and should return true if the chain processing
// should be aborted.
type WalkFunc func(error) bool
// Walk invokes the given function for each error in the chain. If the
// provided functions returns true or no further cause can be found, the process
// is stopped and no further calls will be made.
//
// The next error in the chain is determined by the following rules:
// - If the current error has a `Cause() error` method (github.com/pkg/errors),
// the return value of this method is used.
// - If the current error has a `Unwrap() error` method (golang.org/x/xerrors),
// the return value of this method is used.
// - Common errors in the Go runtime that contain an Err field will use this value.
func Walk(err error, f WalkFunc) {
for prev := err; err != nil; prev = err {
if f(err) {
return
}
switch e := err.(type) {
case causer:
err = e.Cause()
case wrapper:
err = e.Unwrap()
default:
// Unpack any struct or *struct with a field of name Err which satisfies
// the error interface. This includes *url.Error, *net.OpError,
// *os.SyscallError and many others in the stdlib.
errType := reflect.TypeOf(err)
errValue := reflect.ValueOf(err)
if errValue.IsValid() && errType.Kind() == reflect.Ptr {
errType = errType.Elem()
errValue = errValue.Elem()
}
if errValue.IsValid() && errType.Kind() == reflect.Struct {
if errField := errValue.FieldByName("Err"); errField.IsValid() {
errFieldValue := errField.Interface()
if newErr, ok := errFieldValue.(error); ok {
err = newErr
}
}
}
}
if err == prev {
break
}
}
}
type causer interface {
Cause() error
}
type wrapper interface {
Unwrap() error
}

90
lib/errors/errors_test.go Normal file
View File

@@ -0,0 +1,90 @@
package errors_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/ncw/rclone/lib/errors"
)
func TestWalk(t *testing.T) {
origin := errors.New("origin")
for _, test := range []struct {
err error
calls int
last error
}{
{causerError{nil}, 1, causerError{nil}},
{wrapperError{nil}, 1, wrapperError{nil}},
{reflectError{nil}, 1, reflectError{nil}},
{causerError{origin}, 2, origin},
{wrapperError{origin}, 2, origin},
{reflectError{origin}, 2, origin},
{causerError{reflectError{origin}}, 3, origin},
{wrapperError{causerError{origin}}, 3, origin},
{reflectError{wrapperError{origin}}, 3, origin},
{causerError{reflectError{causerError{origin}}}, 4, origin},
{wrapperError{causerError{wrapperError{origin}}}, 4, origin},
{reflectError{wrapperError{reflectError{origin}}}, 4, origin},
{stopError{nil}, 1, stopError{nil}},
{stopError{causerError{nil}}, 1, stopError{causerError{nil}}},
{stopError{wrapperError{nil}}, 1, stopError{wrapperError{nil}}},
{stopError{reflectError{nil}}, 1, stopError{reflectError{nil}}},
{causerError{stopError{origin}}, 2, stopError{origin}},
{wrapperError{stopError{origin}}, 2, stopError{origin}},
{reflectError{stopError{origin}}, 2, stopError{origin}},
{causerError{reflectError{stopError{nil}}}, 3, stopError{nil}},
{wrapperError{causerError{stopError{nil}}}, 3, stopError{nil}},
{reflectError{wrapperError{stopError{nil}}}, 3, stopError{nil}},
} {
var last error
calls := 0
errors.Walk(test.err, func(err error) bool {
calls++
last = err
_, stop := err.(stopError)
return stop
})
assert.Equal(t, test.calls, calls)
assert.Equal(t, test.last, last)
}
}
type causerError struct {
err error
}
type wrapperError struct {
err error
}
type reflectError struct {
Err error
}
type stopError struct {
err error
}
func (e causerError) Error() string {
return fmt.Sprintf("causerError(%s)", e.err)
}
func (e causerError) Cause() error {
return e.err
}
func (e wrapperError) Unwrap() error {
return e.err
}
func (e wrapperError) Error() string {
return fmt.Sprintf("wrapperError(%s)", e.err)
}
func (e reflectError) Error() string {
return fmt.Sprintf("reflectError(%s)", e.Err)
}
func (e stopError) Error() string {
return fmt.Sprintf("stopError(%s)", e.err)
}
func (e stopError) Cause() error {
return e.err
}

View File

@@ -2,74 +2,69 @@
package pacer
import (
"context"
"math/rand"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"golang.org/x/time/rate"
"github.com/ncw/rclone/lib/errors"
)
// Pacer state
type Pacer struct {
mu sync.Mutex // Protecting read/writes
minSleep time.Duration // minimum sleep time
maxSleep time.Duration // maximum sleep time
burst int // number of calls to send without rate limiting
limiter *rate.Limiter // rate limiter for the minsleep
decayConstant uint // decay constant
attackConstant uint // attack constant
pacer chan struct{} // To pace the operations
sleepTime time.Duration // Time to sleep for each transaction
retries int // Max number of retries
maxConnections int // Maximum number of concurrent connections
connTokens chan struct{} // Connection tokens
calculatePace func(bool) // switchable pacing algorithm - call with mu held
consecutiveRetries int // number of consecutive retries
// State represents the public Pacer state that will be passed to the
// configured Calculator
type State struct {
SleepTime time.Duration // current time to sleep before adding the pacer token back
ConsecutiveRetries int // number of consecutive retries, will be 0 when the last invoker call returned false
LastError error // the error returned by the last invoker call or nil
}
// Type is for selecting different pacing algorithms
type Type int
// Calculator is a generic calculation function for a Pacer.
type Calculator interface {
// Calculate takes the current Pacer state and returns the sleep time after which
// the next Pacer call will be done.
Calculate(state State) time.Duration
}
const (
// DefaultPacer is a truncated exponential attack and decay.
//
// On retries the sleep time is doubled, on non errors then
// sleeptime decays according to the decay constant as set
// with SetDecayConstant.
//
// The sleep never goes below that set with SetMinSleep or
// above that set with SetMaxSleep.
DefaultPacer = Type(iota)
// Pacer is the primary type of the pacer package. It allows to retry calls
// with a configurable delay in between.
type Pacer struct {
pacerOptions
mu sync.Mutex // Protecting read/writes
pacer chan struct{} // To pace the operations
connTokens chan struct{} // Connection tokens
state State
}
type pacerOptions struct {
maxConnections int // Maximum number of concurrent connections
retries int // Max number of retries
calculator Calculator // switchable pacing algorithm - call with mu held
invoker InvokerFunc // wrapper function used to invoke the target function
}
// AmazonCloudDrivePacer is a specialised pacer for Amazon Drive
//
// It implements a truncated exponential backoff strategy with
// randomization. Normally operations are paced at the
// interval set with SetMinSleep. On errors the sleep timer
// is set to 0..2**retries seconds.
//
// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
AmazonCloudDrivePacer
// InvokerFunc is the signature of the wrapper function used to invoke the
// target function in Pacer.
type InvokerFunc func(try, tries int, f Paced) (bool, error)
// GoogleDrivePacer is a specialised pacer for Google Drive
//
// It implements a truncated exponential backoff strategy with
// randomization. Normally operations are paced at the
// interval set with SetMinSleep. On errors the sleep timer
// is set to (2 ^ n) + random_number_milliseconds seconds
//
// See https://developers.google.com/drive/v2/web/handle-errors#exponential-backoff
GoogleDrivePacer
// Option can be used in New to configure the Pacer.
type Option func(*pacerOptions)
// S3Pacer is a specialised pacer for S3
//
// It is basically the defaultPacer, but allows the sleep time to go to 0
// when things are going well.
S3Pacer
)
// CalculatorOption sets a Calculator for the new Pacer.
func CalculatorOption(c Calculator) Option {
return func(p *pacerOptions) { p.calculator = c }
}
// RetriesOption sets the retries number for the new Pacer.
func RetriesOption(retries int) Option {
return func(p *pacerOptions) { p.retries = retries }
}
// MaxConnectionsOption sets the maximum connections number for the new Pacer.
func MaxConnectionsOption(maxConnections int) Option {
return func(p *pacerOptions) { p.maxConnections = maxConnections }
}
// InvokerOption sets a InvokerFunc for the new Pacer.
func InvokerOption(invoker InvokerFunc) Option {
return func(p *pacerOptions) { p.invoker = invoker }
}
// Paced is a function which is called by the Call and CallNoRetry
// methods. It should return a boolean, true if it would like to be
@@ -77,19 +72,27 @@ const (
// wrapped in a RetryError.
type Paced func() (bool, error)
// New returns a Pacer with sensible defaults
func New() *Pacer {
p := &Pacer{
maxSleep: 2 * time.Second,
decayConstant: 2,
attackConstant: 1,
retries: fs.Config.LowLevelRetries,
pacer: make(chan struct{}, 1),
// New returns a Pacer with sensible defaults.
func New(options ...Option) *Pacer {
opts := pacerOptions{
maxConnections: 10,
retries: 3,
}
p.sleepTime = p.minSleep
p.SetPacer(DefaultPacer)
p.SetMaxConnections(fs.Config.Checkers + fs.Config.Transfers)
p.SetMinSleep(10 * time.Millisecond)
for _, o := range options {
o(&opts)
}
p := &Pacer{
pacerOptions: opts,
pacer: make(chan struct{}, 1),
}
if p.calculator == nil {
p.SetCalculator(nil)
}
p.state.SleepTime = p.calculator.Calculate(p.state)
if p.invoker == nil {
p.invoker = invoke
}
p.SetMaxConnections(p.maxConnections)
// Put the first pacing token in
p.pacer <- struct{}{}
@@ -97,54 +100,11 @@ func New() *Pacer {
return p
}
// SetSleep sets the current sleep time
func (p *Pacer) SetSleep(t time.Duration) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.sleepTime = t
return p
}
// GetSleep gets the current sleep time
func (p *Pacer) GetSleep() time.Duration {
p.mu.Lock()
defer p.mu.Unlock()
return p.sleepTime
}
// SetMinSleep sets the minimum sleep time for the pacer
func (p *Pacer) SetMinSleep(t time.Duration) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.minSleep = t
p.sleepTime = p.minSleep
p.limiter = rate.NewLimiter(rate.Every(p.minSleep), p.burst)
return p
}
// SetBurst sets the burst with no limiting of the pacer
func (p *Pacer) SetBurst(n int) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.burst = n
p.limiter = rate.NewLimiter(rate.Every(p.minSleep), p.burst)
return p
}
// SetMaxSleep sets the maximum sleep time for the pacer
func (p *Pacer) SetMaxSleep(t time.Duration) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.maxSleep = t
p.sleepTime = p.minSleep
return p
}
// SetMaxConnections sets the maximum number of concurrent connections.
// Setting the value to 0 will allow unlimited number of connections.
// Should not be changed once you have started calling the pacer.
// By default this will be set to fs.Config.Checkers.
func (p *Pacer) SetMaxConnections(n int) *Pacer {
func (p *Pacer) SetMaxConnections(n int) {
p.mu.Lock()
defer p.mu.Unlock()
p.maxConnections = n
@@ -156,61 +116,34 @@ func (p *Pacer) SetMaxConnections(n int) *Pacer {
p.connTokens <- struct{}{}
}
}
return p
}
// SetDecayConstant sets the decay constant for the pacer
//
// This is the speed the time falls back to the minimum after errors
// have occurred.
//
// bigger for slower decay, exponential. 1 is halve, 0 is go straight to minimum
func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.decayConstant = decay
return p
}
// SetAttackConstant sets the attack constant for the pacer
//
// This is the speed the time grows from the minimum after errors have
// occurred.
//
// bigger for slower attack, 1 is double, 0 is go straight to maximum
func (p *Pacer) SetAttackConstant(attack uint) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.attackConstant = attack
return p
}
// SetRetries sets the max number of tries for Call
func (p *Pacer) SetRetries(retries int) *Pacer {
// SetRetries sets the max number of retries for Call
func (p *Pacer) SetRetries(retries int) {
p.mu.Lock()
defer p.mu.Unlock()
p.retries = retries
return p
}
// SetPacer sets the pacing algorithm
// SetCalculator sets the pacing algorithm. Don't modify the Calculator object
// afterwards, use the ModifyCalculator method when needed.
//
// It will choose the default algorithm if an incorrect value is
// passed in.
func (p *Pacer) SetPacer(t Type) *Pacer {
// It will choose the default algorithm if nil is passed in.
func (p *Pacer) SetCalculator(c Calculator) {
p.mu.Lock()
defer p.mu.Unlock()
switch t {
case AmazonCloudDrivePacer:
p.calculatePace = p.acdPacer
case GoogleDrivePacer:
p.calculatePace = p.drivePacer
case S3Pacer:
p.calculatePace = p.s3Pacer
default:
p.calculatePace = p.defaultPacer
if c == nil {
c = NewDefault()
}
return p
p.calculator = c
}
// ModifyCalculator calls the given function with the currently configured
// Calculator and the Pacer lock held.
func (p *Pacer) ModifyCalculator(f func(Calculator)) {
p.mu.Lock()
f(p.calculator)
p.mu.Unlock()
}
// Start a call to the API
@@ -230,170 +163,29 @@ func (p *Pacer) beginCall() {
p.mu.Lock()
// Restart the timer
go func(sleepTime, minSleep time.Duration) {
// fs.Debugf(f, "New sleep for %v at %v", t, time.Now())
// Sleep the minimum time with the rate limiter
if minSleep > 0 && sleepTime >= minSleep {
_ = p.limiter.Wait(context.Background())
sleepTime -= minSleep
}
// Then sleep the remaining time
if sleepTime > 0 {
time.Sleep(sleepTime)
}
go func(t time.Duration) {
time.Sleep(t)
p.pacer <- struct{}{}
}(p.sleepTime, p.minSleep)
}(p.state.SleepTime)
p.mu.Unlock()
}
// exponentialImplementation implements a exponentialImplementation up
// and down pacing algorithm
//
// See the description for DefaultPacer
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
//
// Call with p.mu held
func (p *Pacer) defaultPacer(retry bool) {
oldSleepTime := p.sleepTime
if retry {
if p.attackConstant == 0 {
p.sleepTime = p.maxSleep
} else {
p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
}
if p.sleepTime > p.maxSleep {
p.sleepTime = p.maxSleep
}
if p.sleepTime != oldSleepTime {
fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
}
} else {
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
if p.sleepTime < p.minSleep {
p.sleepTime = p.minSleep
}
if p.sleepTime != oldSleepTime {
fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
}
}
}
// acdPacer implements a truncated exponential backoff
// strategy with randomization for Amazon Drive
//
// See the description for AmazonCloudDrivePacer
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
//
// Call with p.mu held
func (p *Pacer) acdPacer(retry bool) {
consecutiveRetries := p.consecutiveRetries
if consecutiveRetries == 0 {
if p.sleepTime != p.minSleep {
p.sleepTime = p.minSleep
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
}
} else {
if consecutiveRetries > 9 {
consecutiveRetries = 9
}
// consecutiveRetries starts at 1 so
// maxSleep is 2**(consecutiveRetries-1) seconds
maxSleep := time.Second << uint(consecutiveRetries-1)
// actual sleep is random from 0..maxSleep
p.sleepTime = time.Duration(rand.Int63n(int64(maxSleep)))
if p.sleepTime < p.minSleep {
p.sleepTime = p.minSleep
}
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
}
}
// drivePacer implements a truncated exponential backoff strategy with
// randomization for Google Drive
//
// See the description for GoogleDrivePacer
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
//
// Call with p.mu held
func (p *Pacer) drivePacer(retry bool) {
consecutiveRetries := p.consecutiveRetries
if consecutiveRetries == 0 {
if p.sleepTime != p.minSleep {
p.sleepTime = p.minSleep
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
}
} else {
if consecutiveRetries > 5 {
consecutiveRetries = 5
}
// consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16
// maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds
p.sleepTime = time.Second<<uint(consecutiveRetries-1) + time.Duration(rand.Int63n(int64(time.Second)))
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
}
}
// s3Pacer implements a pacer compatible with our expectations of S3, where it tries to not
// delay at all between successful calls, but backs off in the default fashion in response
// to any errors.
// The assumption is that errors should be exceedingly rare (S3 seems to have largely solved
// the sort of scability questions rclone is likely to run into), and in the happy case
// it can handle calls with no delays between them.
//
// Basically defaultPacer, but with some handling of sleepTime going to/from 0ms
// Ignores minSleep entirely
//
// Call with p.mu held
func (p *Pacer) s3Pacer(retry bool) {
oldSleepTime := p.sleepTime
if retry {
if p.attackConstant == 0 {
p.sleepTime = p.maxSleep
} else {
if p.sleepTime == 0 {
p.sleepTime = p.minSleep
} else {
p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
}
}
if p.sleepTime > p.maxSleep {
p.sleepTime = p.maxSleep
}
if p.sleepTime != oldSleepTime {
fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
}
} else {
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
if p.sleepTime < p.minSleep {
p.sleepTime = 0
}
if p.sleepTime != oldSleepTime {
fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
}
}
}
// endCall implements the pacing algorithm
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
func (p *Pacer) endCall(retry bool) {
func (p *Pacer) endCall(retry bool, err error) {
if p.maxConnections > 0 {
p.connTokens <- struct{}{}
}
p.mu.Lock()
if retry {
p.consecutiveRetries++
p.state.ConsecutiveRetries++
} else {
p.consecutiveRetries = 0
p.state.ConsecutiveRetries = 0
}
p.calculatePace(retry)
p.state.LastError = err
p.state.SleepTime = p.calculator.Calculate(p.state)
p.mu.Unlock()
}
@@ -402,15 +194,11 @@ func (p *Pacer) call(fn Paced, retries int) (err error) {
var retry bool
for i := 1; i <= retries; i++ {
p.beginCall()
retry, err = fn()
p.endCall(retry)
retry, err = p.invoker(i, retries, fn)
p.endCall(retry, err)
if !retry {
break
}
fs.Debugf("pacer", "low level retry %d/%d (error %v)", i, retries, err)
}
if retry {
err = fserrors.RetryError(err)
}
return err
}
@@ -436,3 +224,41 @@ func (p *Pacer) Call(fn Paced) (err error) {
func (p *Pacer) CallNoRetry(fn Paced) error {
return p.call(fn, 1)
}
func invoke(try, tries int, f Paced) (bool, error) {
return f()
}
type retryAfterError struct {
error
retryAfter time.Duration
}
func (r *retryAfterError) Error() string {
return r.error.Error()
}
func (r *retryAfterError) Cause() error {
return r.error
}
// RetryAfterError returns a wrapped error that can be used by Calculator implementations
func RetryAfterError(err error, retryAfter time.Duration) error {
return &retryAfterError{
error: err,
retryAfter: retryAfter,
}
}
// IsRetryAfter returns true if the the error or any of it's Cause's is an error
// returned by RetryAfterError. It also returns the associated Duration if possible.
func IsRetryAfter(err error) (retryAfter time.Duration, isRetryAfter bool) {
errors.Walk(err, func(err error) bool {
if r, ok := err.(*retryAfterError); ok {
retryAfter, isRetryAfter = r.retryAfter, true
return true
}
return false
})
return
}

View File

@@ -1,181 +1,85 @@
package pacer
import (
"fmt"
"sync"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
func TestNew(t *testing.T) {
const expectedRetries = 7
fs.Config.LowLevelRetries = expectedRetries
p := New()
if p.minSleep != 10*time.Millisecond {
t.Errorf("minSleep")
}
if p.maxSleep != 2*time.Second {
t.Errorf("maxSleep")
}
if p.sleepTime != p.minSleep {
t.Errorf("sleepTime")
}
if p.retries != expectedRetries {
t.Errorf("retries want %v got %v", expectedRetries, p.retries)
}
if p.decayConstant != 2 {
t.Errorf("decayConstant")
}
if p.attackConstant != 1 {
t.Errorf("attackConstant")
}
if cap(p.pacer) != 1 {
t.Errorf("pacer 1")
}
if len(p.pacer) != 1 {
t.Errorf("pacer 2")
}
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.defaultPacer) {
t.Errorf("calculatePace")
}
if p.maxConnections != fs.Config.Checkers+fs.Config.Transfers {
t.Errorf("maxConnections")
}
if cap(p.connTokens) != fs.Config.Checkers+fs.Config.Transfers {
t.Errorf("connTokens")
}
if p.consecutiveRetries != 0 {
t.Errorf("consecutiveRetries")
}
}
func TestSetSleep(t *testing.T) {
p := New().SetSleep(2 * time.Millisecond)
if p.sleepTime != 2*time.Millisecond {
t.Errorf("didn't set")
}
}
func TestGetSleep(t *testing.T) {
p := New().SetSleep(2 * time.Millisecond)
if p.GetSleep() != 2*time.Millisecond {
t.Errorf("didn't get")
}
}
func TestSetMinSleep(t *testing.T) {
p := New().SetMinSleep(1 * time.Millisecond)
if p.minSleep != 1*time.Millisecond {
t.Errorf("didn't set")
}
}
func TestSetMaxSleep(t *testing.T) {
p := New().SetMaxSleep(100 * time.Second)
if p.maxSleep != 100*time.Second {
t.Errorf("didn't set")
const expectedConnections = 9
p := New(RetriesOption(expectedRetries), MaxConnectionsOption(expectedConnections))
if d, ok := p.calculator.(*Default); ok {
assert.Equal(t, 10*time.Millisecond, d.minSleep)
assert.Equal(t, 2*time.Second, d.maxSleep)
assert.Equal(t, d.minSleep, p.state.SleepTime)
assert.Equal(t, uint(2), d.decayConstant)
assert.Equal(t, uint(1), d.attackConstant)
} else {
t.Errorf("calculator")
}
assert.Equal(t, expectedRetries, p.retries)
assert.Equal(t, 1, cap(p.pacer))
assert.Equal(t, 1, len(p.pacer))
assert.Equal(t, expectedConnections, p.maxConnections)
assert.Equal(t, expectedConnections, cap(p.connTokens))
assert.Equal(t, 0, p.state.ConsecutiveRetries)
}
func TestMaxConnections(t *testing.T) {
p := New().SetMaxConnections(20)
if p.maxConnections != 20 {
t.Errorf("maxConnections")
}
if cap(p.connTokens) != 20 {
t.Errorf("connTokens")
}
p := New()
p.SetMaxConnections(20)
assert.Equal(t, 20, p.maxConnections)
assert.Equal(t, 20, cap(p.connTokens))
p.SetMaxConnections(0)
if p.maxConnections != 0 {
t.Errorf("maxConnections is not 0")
}
if p.connTokens != nil {
t.Errorf("connTokens is not nil")
}
}
func TestSetDecayConstant(t *testing.T) {
p := New().SetDecayConstant(17)
if p.decayConstant != 17 {
t.Errorf("didn't set")
}
assert.Equal(t, 0, p.maxConnections)
assert.Nil(t, p.connTokens)
}
func TestDecay(t *testing.T) {
p := New().SetMinSleep(time.Microsecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second)
c := NewDefault(MinSleep(1*time.Microsecond), MaxSleep(1*time.Second))
for _, test := range []struct {
in time.Duration
in State
attackConstant uint
want time.Duration
}{
{8 * time.Millisecond, 1, 4 * time.Millisecond},
{1 * time.Millisecond, 0, time.Microsecond},
{1 * time.Millisecond, 2, (3 * time.Millisecond) / 4},
{1 * time.Millisecond, 3, (7 * time.Millisecond) / 8},
{State{SleepTime: 8 * time.Millisecond}, 1, 4 * time.Millisecond},
{State{SleepTime: 1 * time.Millisecond}, 0, 1 * time.Microsecond},
{State{SleepTime: 1 * time.Millisecond}, 2, (3 * time.Millisecond) / 4},
{State{SleepTime: 1 * time.Millisecond}, 3, (7 * time.Millisecond) / 8},
} {
p.sleepTime = test.in
p.SetDecayConstant(test.attackConstant)
p.defaultPacer(false)
got := p.sleepTime
if got != test.want {
t.Errorf("bad sleep want %v got %v", test.want, got)
}
}
}
func TestSetAttackConstant(t *testing.T) {
p := New().SetAttackConstant(19)
if p.attackConstant != 19 {
t.Errorf("didn't set")
c.decayConstant = test.attackConstant
got := c.Calculate(test.in)
assert.Equal(t, test.want, got, "test: %+v", test)
}
}
func TestAttack(t *testing.T) {
p := New().SetMinSleep(time.Microsecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second)
c := NewDefault(MinSleep(1*time.Microsecond), MaxSleep(1*time.Second))
for _, test := range []struct {
in time.Duration
in State
attackConstant uint
want time.Duration
}{
{1 * time.Millisecond, 1, 2 * time.Millisecond},
{1 * time.Millisecond, 0, time.Second},
{1 * time.Millisecond, 2, (4 * time.Millisecond) / 3},
{1 * time.Millisecond, 3, (8 * time.Millisecond) / 7},
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 1, 2 * time.Millisecond},
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 0, 1 * time.Second},
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 2, (4 * time.Millisecond) / 3},
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 3, (8 * time.Millisecond) / 7},
} {
p.sleepTime = test.in
p.SetAttackConstant(test.attackConstant)
p.defaultPacer(true)
got := p.sleepTime
if got != test.want {
t.Errorf("bad sleep want %v got %v", test.want, got)
}
c.attackConstant = test.attackConstant
got := c.Calculate(test.in)
assert.Equal(t, test.want, got, "test: %+v", test)
}
}
func TestSetRetries(t *testing.T) {
p := New().SetRetries(18)
if p.retries != 18 {
t.Errorf("didn't set")
}
}
func TestSetPacer(t *testing.T) {
p := New().SetPacer(AmazonCloudDrivePacer)
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.acdPacer) {
t.Errorf("calculatePace is not acdPacer")
}
p.SetPacer(GoogleDrivePacer)
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.drivePacer) {
t.Errorf("calculatePace is not drivePacer")
}
p.SetPacer(DefaultPacer)
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.defaultPacer) {
t.Errorf("calculatePace is not defaultPacer")
}
p := New()
p.SetRetries(18)
assert.Equal(t, 18, p.retries)
}
// emptyTokens empties the pacer of all its tokens
@@ -200,7 +104,7 @@ func waitForPace(p *Pacer, duration time.Duration) (when time.Time) {
}
func TestBeginCall(t *testing.T) {
p := New().SetMaxConnections(10).SetMinSleep(1 * time.Millisecond)
p := New(MaxConnectionsOption(10), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond))))
emptyTokens(p)
go p.beginCall()
if !waitForPace(p, 10*time.Millisecond).IsZero() {
@@ -223,7 +127,7 @@ func TestBeginCall(t *testing.T) {
}
func TestBeginCallZeroConnections(t *testing.T) {
p := New().SetMaxConnections(0).SetMinSleep(1 * time.Millisecond)
p := New(MaxConnectionsOption(0), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond))))
emptyTokens(p)
go p.beginCall()
if !waitForPace(p, 10*time.Millisecond).IsZero() {
@@ -241,155 +145,143 @@ func TestBeginCallZeroConnections(t *testing.T) {
}
func TestDefaultPacer(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second).SetDecayConstant(2)
c := NewDefault(MinSleep(1*time.Millisecond), MaxSleep(1*time.Second), DecayConstant(2))
for _, test := range []struct {
in time.Duration
retry bool
state State
want time.Duration
}{
{time.Millisecond, true, 2 * time.Millisecond},
{time.Second, true, time.Second},
{(3 * time.Second) / 4, true, time.Second},
{time.Second, false, 750 * time.Millisecond},
{1000 * time.Microsecond, false, time.Millisecond},
{1200 * time.Microsecond, false, time.Millisecond},
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 2 * time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 1 * time.Second},
{State{SleepTime: (3 * time.Second) / 4, ConsecutiveRetries: 1}, 1 * time.Second},
{State{SleepTime: 1 * time.Second}, 750 * time.Millisecond},
{State{SleepTime: 1000 * time.Microsecond}, 1 * time.Millisecond},
{State{SleepTime: 1200 * time.Microsecond}, 1 * time.Millisecond},
} {
p.sleepTime = test.in
p.defaultPacer(test.retry)
got := p.sleepTime
if got != test.want {
t.Errorf("bad sleep want %v got %v", test.want, got)
}
got := c.Calculate(test.state)
assert.Equal(t, test.want, got, "test: %+v", test)
}
}
func TestAmazonCloudDrivePacer(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetPacer(AmazonCloudDrivePacer).SetMaxSleep(time.Second).SetDecayConstant(2)
c := NewAmazonCloudDrive(MinSleep(1 * time.Millisecond))
// Do lots of times because of the random number!
for _, test := range []struct {
in time.Duration
consecutiveRetries int
retry bool
want time.Duration
state State
want time.Duration
}{
{time.Millisecond, 0, true, time.Millisecond},
{10 * time.Millisecond, 0, true, time.Millisecond},
{1 * time.Second, 1, true, 500 * time.Millisecond},
{1 * time.Second, 2, true, 1 * time.Second},
{1 * time.Second, 3, true, 2 * time.Second},
{1 * time.Second, 4, true, 4 * time.Second},
{1 * time.Second, 5, true, 8 * time.Second},
{1 * time.Second, 6, true, 16 * time.Second},
{1 * time.Second, 7, true, 32 * time.Second},
{1 * time.Second, 8, true, 64 * time.Second},
{1 * time.Second, 9, true, 128 * time.Second},
{1 * time.Second, 10, true, 128 * time.Second},
{1 * time.Second, 11, true, 128 * time.Second},
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 0}, 1 * time.Millisecond},
{State{SleepTime: 10 * time.Millisecond, ConsecutiveRetries: 0}, 1 * time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 500 * time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 2}, 1 * time.Second},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 3}, 2 * time.Second},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 4}, 4 * time.Second},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 5}, 8 * time.Second},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 6}, 16 * time.Second},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 7}, 32 * time.Second},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 8}, 64 * time.Second},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 9}, 128 * time.Second},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 10}, 128 * time.Second},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 11}, 128 * time.Second},
} {
const n = 1000
var sum time.Duration
// measure average time over n cycles
for i := 0; i < n; i++ {
p.sleepTime = test.in
p.consecutiveRetries = test.consecutiveRetries
p.acdPacer(test.retry)
sum += p.sleepTime
sum += c.Calculate(test.state)
}
got := sum / n
//t.Logf("%+v: got = %v", test, got)
if got < (test.want*9)/10 || got > (test.want*11)/10 {
t.Fatalf("%+v: bad sleep want %v+/-10%% got %v", test, test.want, got)
}
assert.False(t, got < (test.want*9)/10 || got > (test.want*11)/10, "test: %+v", test)
}
}
func TestGoogleDrivePacer(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetPacer(GoogleDrivePacer).SetMaxSleep(time.Second).SetDecayConstant(2)
// Do lots of times because of the random number!
for _, test := range []struct {
in time.Duration
consecutiveRetries int
retry bool
want time.Duration
state State
want time.Duration
}{
{time.Millisecond, 0, true, time.Millisecond},
{10 * time.Millisecond, 0, true, time.Millisecond},
{1 * time.Second, 1, true, 1*time.Second + 500*time.Millisecond},
{1 * time.Second, 2, true, 2*time.Second + 500*time.Millisecond},
{1 * time.Second, 3, true, 4*time.Second + 500*time.Millisecond},
{1 * time.Second, 4, true, 8*time.Second + 500*time.Millisecond},
{1 * time.Second, 5, true, 16*time.Second + 500*time.Millisecond},
{1 * time.Second, 6, true, 16*time.Second + 500*time.Millisecond},
{1 * time.Second, 7, true, 16*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Millisecond}, 0},
{State{SleepTime: 10 * time.Millisecond}, 0},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 1*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 2}, 2*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 3}, 4*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 4}, 8*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 5}, 16*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 6}, 16*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 7}, 16*time.Second + 500*time.Millisecond},
} {
const n = 1000
var sum time.Duration
// measure average time over n cycles
for i := 0; i < n; i++ {
p.sleepTime = test.in
p.consecutiveRetries = test.consecutiveRetries
p.drivePacer(test.retry)
sum += p.sleepTime
c := NewGoogleDrive(MinSleep(1 * time.Millisecond))
sum += c.Calculate(test.state)
}
got := sum / n
//t.Logf("%+v: got = %v", test, got)
if got < (test.want*9)/10 || got > (test.want*11)/10 {
t.Fatalf("%+v: bad sleep want %v+/-10%% got %v", test, test.want, got)
assert.False(t, got < (test.want*9)/10 || got > (test.want*11)/10, "test: %+v, got: %v", test, got)
}
const minSleep = 2 * time.Millisecond
for _, test := range []struct {
calls int
want int
}{
{1, 0},
{9, 0},
{10, 0},
{11, 1},
{12, 2},
} {
c := NewGoogleDrive(MinSleep(minSleep), Burst(10))
count := 0
for i := 0; i < test.calls; i++ {
sleep := c.Calculate(State{})
if sleep != 0 {
count++
}
}
assert.Equalf(t, test.want, count, "test: %+v, got: %v", test, count)
}
}
func TestS3Pacer(t *testing.T) {
p := New().SetMinSleep(10 * time.Millisecond).SetPacer(S3Pacer).SetMaxSleep(time.Second).SetDecayConstant(2)
c := NewS3(MinSleep(10*time.Millisecond), MaxSleep(1*time.Second), DecayConstant(2))
for _, test := range []struct {
in time.Duration
retry bool
state State
want time.Duration
}{
{0, true, 10 * time.Millisecond}, //Things were going ok, we failed once, back off to minSleep
{10 * time.Millisecond, true, 20 * time.Millisecond}, //Another fail, double the backoff
{10 * time.Millisecond, false, 0}, //Things start going ok when we're at minSleep; should result in no sleep
{12 * time.Millisecond, false, 0}, //*near* minsleep and going ok, decay would take below minSleep, should go to 0
{0, false, 0}, //Things have been going ok; not retrying should keep sleep at 0
{time.Second, true, time.Second}, //Check maxSleep is enforced
{(3 * time.Second) / 4, true, time.Second}, //Check attack heading to maxSleep doesn't exceed maxSleep
{time.Second, false, 750 * time.Millisecond}, //Check decay from maxSleep
{48 * time.Millisecond, false, 36 * time.Millisecond}, //Check simple decay above minSleep
{State{SleepTime: 0, ConsecutiveRetries: 1}, 10 * time.Millisecond}, //Things were going ok, we failed once, back off to minSleep
{State{SleepTime: 10 * time.Millisecond, ConsecutiveRetries: 1}, 20 * time.Millisecond}, //Another fail, double the backoff
{State{SleepTime: 10 * time.Millisecond}, 0}, //Things start going ok when we're at minSleep; should result in no sleep
{State{SleepTime: 12 * time.Millisecond}, 0}, //*near* minsleep and going ok, decay would take below minSleep, should go to 0
{State{SleepTime: 0}, 0}, //Things have been going ok; not retrying should keep sleep at 0
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 1 * time.Second}, //Check maxSleep is enforced
{State{SleepTime: (3 * time.Second) / 4, ConsecutiveRetries: 1}, 1 * time.Second}, //Check attack heading to maxSleep doesn't exceed maxSleep
{State{SleepTime: 1 * time.Second}, 750 * time.Millisecond}, //Check decay from maxSleep
{State{SleepTime: 48 * time.Millisecond}, 36 * time.Millisecond}, //Check simple decay above minSleep
} {
p.sleepTime = test.in
p.s3Pacer(test.retry)
got := p.sleepTime
if got != test.want {
t.Errorf("bad sleep for %v with retry %v: want %v got %v", test.in, test.retry, test.want, got)
}
got := c.Calculate(test.state)
assert.Equal(t, test.want, got, "test: %+v", test)
}
}
func TestEndCall(t *testing.T) {
p := New().SetMaxConnections(5)
p := New(MaxConnectionsOption(5))
emptyTokens(p)
p.consecutiveRetries = 1
p.endCall(true)
if len(p.connTokens) != 1 {
t.Errorf("Expecting 1 token")
}
if p.consecutiveRetries != 2 {
t.Errorf("Bad consecutive retries")
}
p.state.ConsecutiveRetries = 1
p.endCall(true, nil)
assert.Equal(t, 1, len(p.connTokens))
assert.Equal(t, 2, p.state.ConsecutiveRetries)
}
func TestEndCallZeroConnections(t *testing.T) {
p := New().SetMaxConnections(0)
p := New(MaxConnectionsOption(0))
emptyTokens(p)
p.consecutiveRetries = 1
p.endCall(false)
if len(p.connTokens) != 0 {
t.Errorf("Expecting 0 token")
}
if p.consecutiveRetries != 0 {
t.Errorf("Bad consecutive retries")
}
p.state.ConsecutiveRetries = 1
p.endCall(false, nil)
assert.Equal(t, 0, len(p.connTokens))
assert.Equal(t, 0, p.state.ConsecutiveRetries)
}
var errFoo = errors.New("foo")
@@ -397,67 +289,79 @@ var errFoo = errors.New("foo")
type dummyPaced struct {
retry bool
called int
wait *sync.Cond
}
func (dp *dummyPaced) fn() (bool, error) {
dp.called++
if dp.wait != nil {
dp.wait.L.Lock()
dp.called++
dp.wait.Wait()
dp.wait.L.Unlock()
} else {
dp.called++
}
return dp.retry, errFoo
}
func Test_callNoRetry(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond)
func TestCallFixed(t *testing.T) {
p := New(CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
dp := &dummyPaced{retry: false}
err := p.call(dp.fn, 10)
if dp.called != 1 {
t.Errorf("called want %d got %d", 1, dp.called)
}
if err != errFoo {
t.Errorf("err want %v got %v", errFoo, err)
}
assert.Equal(t, 1, dp.called)
assert.Equal(t, errFoo, err)
}
func Test_callRetry(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond)
p := New(CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
dp := &dummyPaced{retry: true}
err := p.call(dp.fn, 10)
if dp.called != 10 {
t.Errorf("called want %d got %d", 10, dp.called)
}
if err == errFoo {
t.Errorf("err didn't want %v got %v", errFoo, err)
}
_, ok := err.(fserrors.Retrier)
if !ok {
t.Errorf("didn't return a retry error")
}
assert.Equal(t, 10, dp.called)
assert.Equal(t, errFoo, err)
}
func TestCall(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond).SetRetries(20)
p := New(RetriesOption(20), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
dp := &dummyPaced{retry: true}
err := p.Call(dp.fn)
if dp.called != 20 {
t.Errorf("called want %d got %d", 20, dp.called)
}
_, ok := err.(fserrors.Retrier)
if !ok {
t.Errorf("didn't return a retry error")
}
assert.Equal(t, 20, dp.called)
assert.Equal(t, errFoo, err)
}
func TestCallNoRetry(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond).SetRetries(20)
func TestCallParallel(t *testing.T) {
p := New(MaxConnectionsOption(3), RetriesOption(1), CalculatorOption(NewDefault(MinSleep(100*time.Microsecond), MaxSleep(1*time.Millisecond))))
dp := &dummyPaced{retry: true}
err := p.CallNoRetry(dp.fn)
if dp.called != 1 {
t.Errorf("called want %d got %d", 1, dp.called)
wait := sync.NewCond(&sync.Mutex{})
funcs := make([]*dummyPaced, 5)
for i := range funcs {
dp := &dummyPaced{wait: wait}
funcs[i] = dp
go func() {
assert.Equal(t, errFoo, p.CallNoRetry(dp.fn))
}()
}
_, ok := err.(fserrors.Retrier)
if !ok {
t.Errorf("didn't return a retry error")
time.Sleep(10 * time.Millisecond)
called := 0
wait.L.Lock()
for _, dp := range funcs {
called += dp.called
}
wait.L.Unlock()
assert.Equal(t, 3, called)
wait.Broadcast()
time.Sleep(20 * time.Millisecond)
called = 0
wait.L.Lock()
for _, dp := range funcs {
called += dp.called
}
wait.L.Unlock()
assert.Equal(t, 5, called)
wait.Broadcast()
}

326
lib/pacer/pacers.go Normal file
View File

@@ -0,0 +1,326 @@
package pacer
import (
"math/rand"
"time"
"golang.org/x/time/rate"
)
type (
// MinSleep configures the minimum sleep time of a Calculator
MinSleep time.Duration
// MaxSleep configures the maximum sleep time of a Calculator
MaxSleep time.Duration
// DecayConstant configures the decay constant time of a Calculator
DecayConstant uint
// AttackConstant configures the attack constant of a Calculator
AttackConstant uint
// Burst configures the number of API calls to allow without sleeping
Burst int
)
// Default is a truncated exponential attack and decay.
//
// On retries the sleep time is doubled, on non errors then sleeptime decays
// according to the decay constant as set with SetDecayConstant.
//
// The sleep never goes below that set with SetMinSleep or above that set
// with SetMaxSleep.
type Default struct {
minSleep time.Duration // minimum sleep time
maxSleep time.Duration // maximum sleep time
decayConstant uint // decay constant
attackConstant uint // attack constant
}
// DefaultOption is the interface implemented by all options for the Default Calculator
type DefaultOption interface {
ApplyDefault(*Default)
}
// NewDefault creates a Calculator used by Pacer as the default.
func NewDefault(opts ...DefaultOption) *Default {
c := &Default{
minSleep: 10 * time.Millisecond,
maxSleep: 2 * time.Second,
decayConstant: 2,
attackConstant: 1,
}
c.Update(opts...)
return c
}
// Update applies the Calculator options.
func (c *Default) Update(opts ...DefaultOption) {
for _, opt := range opts {
opt.ApplyDefault(c)
}
}
// ApplyDefault updates the value on the Calculator
func (o MinSleep) ApplyDefault(c *Default) {
c.minSleep = time.Duration(o)
}
// ApplyDefault updates the value on the Calculator
func (o MaxSleep) ApplyDefault(c *Default) {
c.maxSleep = time.Duration(o)
}
// ApplyDefault updates the value on the Calculator
func (o DecayConstant) ApplyDefault(c *Default) {
c.decayConstant = uint(o)
}
// ApplyDefault updates the value on the Calculator
func (o AttackConstant) ApplyDefault(c *Default) {
c.attackConstant = uint(o)
}
// Calculate takes the current Pacer state and return the wait time until the next try.
func (c *Default) Calculate(state State) time.Duration {
if t, ok := IsRetryAfter(state.LastError); ok {
if t < c.minSleep {
return c.minSleep
}
return t
}
if state.ConsecutiveRetries > 0 {
sleepTime := c.maxSleep
if c.attackConstant != 0 {
sleepTime = (state.SleepTime << c.attackConstant) / ((1 << c.attackConstant) - 1)
}
if sleepTime > c.maxSleep {
sleepTime = c.maxSleep
}
return sleepTime
}
sleepTime := (state.SleepTime<<c.decayConstant - state.SleepTime) >> c.decayConstant
if sleepTime < c.minSleep {
sleepTime = c.minSleep
}
return sleepTime
}
// AmazonCloudDrive is a specialized pacer for Amazon Drive
//
// It implements a truncated exponential backoff strategy with randomization.
// Normally operations are paced at the interval set with SetMinSleep. On errors
// the sleep timer is set to 0..2**retries seconds.
//
// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
type AmazonCloudDrive struct {
minSleep time.Duration // minimum sleep time
}
// AmazonCloudDriveOption is the interface implemented by all options for the AmazonCloudDrive Calculator
type AmazonCloudDriveOption interface {
ApplyAmazonCloudDrive(*AmazonCloudDrive)
}
// NewAmazonCloudDrive returns a new AmazonCloudDrive Calculator with default values
func NewAmazonCloudDrive(opts ...AmazonCloudDriveOption) *AmazonCloudDrive {
c := &AmazonCloudDrive{
minSleep: 10 * time.Millisecond,
}
c.Update(opts...)
return c
}
// Update applies the Calculator options.
func (c *AmazonCloudDrive) Update(opts ...AmazonCloudDriveOption) {
for _, opt := range opts {
opt.ApplyAmazonCloudDrive(c)
}
}
// ApplyAmazonCloudDrive updates the value on the Calculator
func (o MinSleep) ApplyAmazonCloudDrive(c *AmazonCloudDrive) {
c.minSleep = time.Duration(o)
}
// Calculate takes the current Pacer state and return the wait time until the next try.
func (c *AmazonCloudDrive) Calculate(state State) time.Duration {
if t, ok := IsRetryAfter(state.LastError); ok {
if t < c.minSleep {
return c.minSleep
}
return t
}
consecutiveRetries := state.ConsecutiveRetries
if consecutiveRetries == 0 {
return c.minSleep
}
if consecutiveRetries > 9 {
consecutiveRetries = 9
}
// consecutiveRetries starts at 1 so
// maxSleep is 2**(consecutiveRetries-1) seconds
maxSleep := time.Second << uint(consecutiveRetries-1)
// actual sleep is random from 0..maxSleep
sleepTime := time.Duration(rand.Int63n(int64(maxSleep)))
if sleepTime < c.minSleep {
sleepTime = c.minSleep
}
return sleepTime
}
// GoogleDrive is a specialized pacer for Google Drive
//
// It implements a truncated exponential backoff strategy with randomization.
// Normally operations are paced at the interval set with SetMinSleep. On errors
// the sleep timer is set to (2 ^ n) + random_number_milliseconds seconds.
//
// See https://developers.google.com/drive/v2/web/handle-errors#exponential-backoff
type GoogleDrive struct {
minSleep time.Duration // minimum sleep time
burst int // number of requests without sleeping
limiter *rate.Limiter // rate limiter for the minSleep
}
// GoogleDriveOption is the interface implemented by all options for the GoogleDrive Calculator
type GoogleDriveOption interface {
ApplyGoogleDrive(*GoogleDrive)
}
// NewGoogleDrive returns a new GoogleDrive Calculator with default values
func NewGoogleDrive(opts ...GoogleDriveOption) *GoogleDrive {
c := &GoogleDrive{
minSleep: 10 * time.Millisecond,
burst: 1,
}
c.Update(opts...)
return c
}
// Update applies the Calculator options.
func (c *GoogleDrive) Update(opts ...GoogleDriveOption) {
for _, opt := range opts {
opt.ApplyGoogleDrive(c)
}
if c.burst <= 0 {
c.burst = 1
}
c.limiter = rate.NewLimiter(rate.Every(c.minSleep), c.burst)
}
// ApplyGoogleDrive updates the value on the Calculator
func (o MinSleep) ApplyGoogleDrive(c *GoogleDrive) {
c.minSleep = time.Duration(o)
}
// ApplyGoogleDrive updates the value on the Calculator
func (o Burst) ApplyGoogleDrive(c *GoogleDrive) {
c.burst = int(o)
}
// Calculate takes the current Pacer state and return the wait time until the next try.
func (c *GoogleDrive) Calculate(state State) time.Duration {
if t, ok := IsRetryAfter(state.LastError); ok {
if t < c.minSleep {
return c.minSleep
}
return t
}
consecutiveRetries := state.ConsecutiveRetries
if consecutiveRetries == 0 {
return c.limiter.Reserve().Delay()
}
if consecutiveRetries > 5 {
consecutiveRetries = 5
}
// consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16
// maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds
return time.Second<<uint(consecutiveRetries-1) + time.Duration(rand.Int63n(int64(time.Second)))
}
// S3 implements a pacer compatible with our expectations of S3, where it tries to not
// delay at all between successful calls, but backs off in the default fashion in response
// to any errors.
// The assumption is that errors should be exceedingly rare (S3 seems to have largely solved
// the sort of stability questions rclone is likely to run into), and in the happy case
// it can handle calls with no delays between them.
//
// Basically defaultPacer, but with some handling of sleepTime going to/from 0ms
type S3 struct {
minSleep time.Duration // minimum sleep time
maxSleep time.Duration // maximum sleep time
decayConstant uint // decay constant
attackConstant uint // attack constant
}
// S3Option is the interface implemented by all options for the S3 Calculator
type S3Option interface {
ApplyS3(*S3)
}
// NewS3 returns a new S3 Calculator with default values
func NewS3(opts ...S3Option) *S3 {
c := &S3{
maxSleep: 2 * time.Second,
decayConstant: 2,
attackConstant: 1,
}
c.Update(opts...)
return c
}
// Update applies the Calculator options.
func (c *S3) Update(opts ...S3Option) {
for _, opt := range opts {
opt.ApplyS3(c)
}
}
// ApplyS3 updates the value on the Calculator
func (o MaxSleep) ApplyS3(c *S3) {
c.maxSleep = time.Duration(o)
}
// ApplyS3 updates the value on the Calculator
func (o MinSleep) ApplyS3(c *S3) {
c.minSleep = time.Duration(o)
}
// ApplyS3 updates the value on the Calculator
func (o DecayConstant) ApplyS3(c *S3) {
c.decayConstant = uint(o)
}
// ApplyS3 updates the value on the Calculator
func (o AttackConstant) ApplyS3(c *S3) {
c.attackConstant = uint(o)
}
// Calculate takes the current Pacer state and return the wait time until the next try.
func (c *S3) Calculate(state State) time.Duration {
if t, ok := IsRetryAfter(state.LastError); ok {
if t < c.minSleep {
return c.minSleep
}
return t
}
if state.ConsecutiveRetries > 0 {
if c.attackConstant == 0 {
return c.maxSleep
}
if state.SleepTime == 0 {
return c.minSleep
}
sleepTime := (state.SleepTime << c.attackConstant) / ((1 << c.attackConstant) - 1)
if sleepTime > c.maxSleep {
sleepTime = c.maxSleep
}
return sleepTime
}
sleepTime := (state.SleepTime<<c.decayConstant - state.SleepTime) >> c.decayConstant
if sleepTime < c.minSleep {
sleepTime = 0
}
return sleepTime
}

View File

@@ -1,363 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View File

@@ -1,30 +0,0 @@
# cleanhttp
Functions for accessing "clean" Go http.Client values
-------------
The Go standard library contains a default `http.Client` called
`http.DefaultClient`. It is a common idiom in Go code to start with
`http.DefaultClient` and tweak it as necessary, and in fact, this is
encouraged; from the `http` package documentation:
> The Client's Transport typically has internal state (cached TCP connections),
so Clients should be reused instead of created as needed. Clients are safe for
concurrent use by multiple goroutines.
Unfortunately, this is a shared value, and it is not uncommon for libraries to
assume that they are free to modify it at will. With enough dependencies, it
can be very easy to encounter strange problems and race conditions due to
manipulation of this shared value across libraries and goroutines (clients are
safe for concurrent use, but writing values to the client struct itself is not
protected).
Making things worse is the fact that a bare `http.Client` will use a default
`http.Transport` called `http.DefaultTransport`, which is another global value
that behaves the same way. So it is not simply enough to replace
`http.DefaultClient` with `&http.Client{}`.
This repository provides some simple functions to get a "clean" `http.Client`
-- one that uses the same default values as the Go standard library, but
returns a client that does not share any state with other clients.

View File

@@ -1,57 +0,0 @@
package cleanhttp
import (
"net"
"net/http"
"runtime"
"time"
)
// DefaultTransport returns a new http.Transport with similar default values to
// http.DefaultTransport, but with idle connections and keepalives disabled.
func DefaultTransport() *http.Transport {
transport := DefaultPooledTransport()
transport.DisableKeepAlives = true
transport.MaxIdleConnsPerHost = -1
return transport
}
// DefaultPooledTransport returns a new http.Transport with similar default
// values to http.DefaultTransport. Do not use this for transient transports as
// it can leak file descriptors over time. Only use this for transports that
// will be re-used for the same host(s).
func DefaultPooledTransport() *http.Transport {
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
}
return transport
}
// DefaultClient returns a new http.Client with similar default values to
// http.Client, but with a non-shared Transport, idle connections disabled, and
// keepalives disabled.
func DefaultClient() *http.Client {
return &http.Client{
Transport: DefaultTransport(),
}
}
// DefaultPooledClient returns a new http.Client with similar default values to
// http.Client, but with a shared Transport. Do not use this function for
// transient clients as it can leak file descriptors over time. Only use this
// for clients that will be re-used for the same host(s).
func DefaultPooledClient() *http.Client {
return &http.Client{
Transport: DefaultPooledTransport(),
}
}

View File

@@ -1,20 +0,0 @@
// Package cleanhttp offers convenience utilities for acquiring "clean"
// http.Transport and http.Client structs.
//
// Values set on http.DefaultClient and http.DefaultTransport affect all
// callers. This can have detrimental effects, esepcially in TLS contexts,
// where client or root certificates set to talk to multiple endpoints can end
// up displacing each other, leading to hard-to-debug issues. This package
// provides non-shared http.Client and http.Transport structs to ensure that
// the configuration will not be overwritten by other parts of the application
// or dependencies.
//
// The DefaultClient and DefaultTransport functions disable idle connections
// and keepalives. Without ensuring that idle connections are closed before
// garbage collection, short-term clients/transports can leak file descriptors,
// eventually leading to "too many open files" errors. If you will be
// connecting to the same hosts repeatedly from the same client, you can use
// DefaultPooledClient to receive a client that has connection pooling
// semantics similar to http.DefaultClient.
//
package cleanhttp

View File

@@ -1 +0,0 @@
module github.com/hashicorp/go-cleanhttp

View File

@@ -1,43 +0,0 @@
package cleanhttp
import (
"net/http"
"strings"
"unicode"
)
// HandlerInput provides input options to cleanhttp's handlers
type HandlerInput struct {
ErrStatus int
}
// PrintablePathCheckHandler is a middleware that ensures the request path
// contains only printable runes.
func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler {
// Nil-check on input to make it optional
if input == nil {
input = &HandlerInput{
ErrStatus: http.StatusBadRequest,
}
}
// Default to http.StatusBadRequest on error
if input.ErrStatus == 0 {
input.ErrStatus = http.StatusBadRequest
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Check URL path for non-printable characters
idx := strings.IndexFunc(r.URL.Path, func(c rune) bool {
return !unicode.IsPrint(c)
})
if idx != -1 {
w.WriteHeader(input.ErrStatus)
return
}
next.ServeHTTP(w, r)
return
})
}

View File

@@ -1,3 +0,0 @@
.idea/
*.iml
*.test

View File

@@ -1,12 +0,0 @@
sudo: false
language: go
go:
- 1.8.1
branches:
only:
- master
script: make updatedeps test

View File

@@ -1,363 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View File

@@ -1,11 +0,0 @@
default: test
test:
go vet ./...
go test -race ./...
updatedeps:
go get -f -t -u ./...
go get -f -u ./...
.PHONY: default test updatedeps

View File

@@ -1,46 +0,0 @@
go-retryablehttp
================
[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis]
[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
[travis]: http://travis-ci.org/hashicorp/go-retryablehttp
[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp
The `retryablehttp` package provides a familiar HTTP client interface with
automatic retries and exponential backoff. It is a thin wrapper over the
standard `net/http` client library and exposes nearly the same public API. This
makes `retryablehttp` very easy to drop into existing programs.
`retryablehttp` performs automatic retries under certain conditions. Mainly, if
an error is returned by the client (connection errors, etc.), or if a 500-range
response code is received (except 501), then a retry is invoked after a wait
period. Otherwise, the response is returned and left to the caller to
interpret.
The main difference from `net/http` is that requests which take a request body
(POST/PUT et. al) can have the body provided in a number of ways (some more or
less efficient) that allow "rewinding" the request body if the initial request
fails so that the full request can be attempted again. See the
[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more
details.
Example Use
===========
Using this library should look almost identical to what you would do with
`net/http`. The most simple example of a GET request is shown below:
```go
resp, err := retryablehttp.Get("/foo")
if err != nil {
panic(err)
}
```
The returned response object is an `*http.Response`, the same thing you would
usually get from `net/http`. Had the request failed one or more times, the above
call would block and retry with exponential backoff.
For more usage and examples see the
[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp).

View File

@@ -1,528 +0,0 @@
// The retryablehttp package provides a familiar HTTP client interface with
// automatic retries and exponential backoff. It is a thin wrapper over the
// standard net/http client library and exposes nearly the same public API.
// This makes retryablehttp very easy to drop into existing programs.
//
// retryablehttp performs automatic retries under certain conditions. Mainly, if
// an error is returned by the client (connection errors etc), or if a 500-range
// response is received, then a retry is invoked. Otherwise, the response is
// returned and left to the caller to interpret.
//
// Requests which take a request body should provide a non-nil function
// parameter. The best choice is to provide either a function satisfying
// ReaderFunc which provides multiple io.Readers in an efficient manner, a
// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte
// slice. As it is a reference type, and we will wrap it as needed by readers,
// we can efficiently re-use the request body without needing to copy it. If an
// io.Reader (such as a *bytes.Reader) is provided, the full body will be read
// prior to the first request, and will be efficiently re-used for any retries.
// ReadSeeker can be used, but some users have observed occasional data races
// between the net/http library and the Seek functionality of some
// implementations of ReadSeeker, so should be avoided if possible.
package retryablehttp
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"math/rand"
"net/http"
"net/url"
"os"
"strings"
"time"
cleanhttp "github.com/hashicorp/go-cleanhttp"
)
var (
// Default retry configuration
defaultRetryWaitMin = 1 * time.Second
defaultRetryWaitMax = 30 * time.Second
defaultRetryMax = 4
// defaultClient is used for performing requests without explicitly making
// a new client. It is purposely private to avoid modifications.
defaultClient = NewClient()
// We need to consume response bodies to maintain http connections, but
// limit the size we consume to respReadLimit.
respReadLimit = int64(4096)
)
// ReaderFunc is the type of function that can be given natively to NewRequest
type ReaderFunc func() (io.Reader, error)
// LenReader is an interface implemented by many in-memory io.Reader's. Used
// for automatically sending the right Content-Length header when possible.
type LenReader interface {
Len() int
}
// Request wraps the metadata needed to create HTTP requests.
type Request struct {
// body is a seekable reader over the request body payload. This is
// used to rewind the request data in between retries.
body ReaderFunc
// Embed an HTTP request directly. This makes a *Request act exactly
// like an *http.Request so that all meta methods are supported.
*http.Request
}
// WithContext returns wrapped Request with a shallow copy of underlying *http.Request
// with its context changed to ctx. The provided ctx must be non-nil.
func (r *Request) WithContext(ctx context.Context) *Request {
r.Request = r.Request.WithContext(ctx)
return r
}
// BodyBytes allows accessing the request body. It is an analogue to
// http.Request's Body variable, but it returns a copy of the underlying data
// rather than consuming it.
//
// This function is not thread-safe; do not call it at the same time as another
// call, or at the same time this request is being used with Client.Do.
func (r *Request) BodyBytes() ([]byte, error) {
if r.body == nil {
return nil, nil
}
body, err := r.body()
if err != nil {
return nil, err
}
buf := new(bytes.Buffer)
_, err = buf.ReadFrom(body)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// NewRequest creates a new wrapped request.
func NewRequest(method, url string, rawBody interface{}) (*Request, error) {
var err error
var body ReaderFunc
var contentLength int64
if rawBody != nil {
switch rawBody.(type) {
// If they gave us a function already, great! Use it.
case ReaderFunc:
body = rawBody.(ReaderFunc)
tmp, err := body()
if err != nil {
return nil, err
}
if lr, ok := tmp.(LenReader); ok {
contentLength = int64(lr.Len())
}
if c, ok := tmp.(io.Closer); ok {
c.Close()
}
case func() (io.Reader, error):
body = rawBody.(func() (io.Reader, error))
tmp, err := body()
if err != nil {
return nil, err
}
if lr, ok := tmp.(LenReader); ok {
contentLength = int64(lr.Len())
}
if c, ok := tmp.(io.Closer); ok {
c.Close()
}
// If a regular byte slice, we can read it over and over via new
// readers
case []byte:
buf := rawBody.([]byte)
body = func() (io.Reader, error) {
return bytes.NewReader(buf), nil
}
contentLength = int64(len(buf))
// If a bytes.Buffer we can read the underlying byte slice over and
// over
case *bytes.Buffer:
buf := rawBody.(*bytes.Buffer)
body = func() (io.Reader, error) {
return bytes.NewReader(buf.Bytes()), nil
}
contentLength = int64(buf.Len())
// We prioritize *bytes.Reader here because we don't really want to
// deal with it seeking so want it to match here instead of the
// io.ReadSeeker case.
case *bytes.Reader:
buf, err := ioutil.ReadAll(rawBody.(*bytes.Reader))
if err != nil {
return nil, err
}
body = func() (io.Reader, error) {
return bytes.NewReader(buf), nil
}
contentLength = int64(len(buf))
// Compat case
case io.ReadSeeker:
raw := rawBody.(io.ReadSeeker)
body = func() (io.Reader, error) {
raw.Seek(0, 0)
return ioutil.NopCloser(raw), nil
}
if lr, ok := raw.(LenReader); ok {
contentLength = int64(lr.Len())
}
// Read all in so we can reset
case io.Reader:
buf, err := ioutil.ReadAll(rawBody.(io.Reader))
if err != nil {
return nil, err
}
body = func() (io.Reader, error) {
return bytes.NewReader(buf), nil
}
contentLength = int64(len(buf))
default:
return nil, fmt.Errorf("cannot handle type %T", rawBody)
}
}
httpReq, err := http.NewRequest(method, url, nil)
if err != nil {
return nil, err
}
httpReq.ContentLength = contentLength
return &Request{body, httpReq}, nil
}
// Logger interface allows to use other loggers than
// standard log.Logger.
type Logger interface {
Printf(string, ...interface{})
}
// RequestLogHook allows a function to run before each retry. The HTTP
// request which will be made, and the retry number (0 for the initial
// request) are available to users. The internal logger is exposed to
// consumers.
type RequestLogHook func(Logger, *http.Request, int)
// ResponseLogHook is like RequestLogHook, but allows running a function
// on each HTTP response. This function will be invoked at the end of
// every HTTP request executed, regardless of whether a subsequent retry
// needs to be performed or not. If the response body is read or closed
// from this method, this will affect the response returned from Do().
type ResponseLogHook func(Logger, *http.Response)
// CheckRetry specifies a policy for handling retries. It is called
// following each request with the response and error values returned by
// the http.Client. If CheckRetry returns false, the Client stops retrying
// and returns the response to the caller. If CheckRetry returns an error,
// that error value is returned in lieu of the error from the request. The
// Client will close any response body when retrying, but if the retry is
// aborted it is up to the CheckResponse callback to properly close any
// response body before returning.
type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error)
// Backoff specifies a policy for how long to wait between retries.
// It is called after a failing request to determine the amount of time
// that should pass before trying again.
type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration
// ErrorHandler is called if retries are expired, containing the last status
// from the http library. If not specified, default behavior for the library is
// to close the body and return an error indicating how many tries were
// attempted. If overriding this, be sure to close the body if needed.
type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error)
// Client is used to make HTTP requests. It adds additional functionality
// like automatic retries to tolerate minor outages.
type Client struct {
HTTPClient *http.Client // Internal HTTP client.
Logger Logger // Customer logger instance.
RetryWaitMin time.Duration // Minimum time to wait
RetryWaitMax time.Duration // Maximum time to wait
RetryMax int // Maximum number of retries
// RequestLogHook allows a user-supplied function to be called
// before each retry.
RequestLogHook RequestLogHook
// ResponseLogHook allows a user-supplied function to be called
// with the response from each HTTP request executed.
ResponseLogHook ResponseLogHook
// CheckRetry specifies the policy for handling retries, and is called
// after each request. The default policy is DefaultRetryPolicy.
CheckRetry CheckRetry
// Backoff specifies the policy for how long to wait between retries
Backoff Backoff
// ErrorHandler specifies the custom error handler to use, if any
ErrorHandler ErrorHandler
}
// NewClient creates a new Client with default settings.
func NewClient() *Client {
return &Client{
HTTPClient: cleanhttp.DefaultClient(),
Logger: log.New(os.Stderr, "", log.LstdFlags),
RetryWaitMin: defaultRetryWaitMin,
RetryWaitMax: defaultRetryWaitMax,
RetryMax: defaultRetryMax,
CheckRetry: DefaultRetryPolicy,
Backoff: DefaultBackoff,
}
}
// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which
// will retry on connection errors and server errors.
func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) {
// do not retry on context.Canceled or context.DeadlineExceeded
if ctx.Err() != nil {
return false, ctx.Err()
}
if err != nil {
return true, err
}
// Check the response code. We retry on 500-range responses to allow
// the server time to recover, as 500's are typically not permanent
// errors and may relate to outages on the server side. This will catch
// invalid response codes as well, like 0 and 999.
if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) {
return true, nil
}
return false, nil
}
// DefaultBackoff provides a default callback for Client.Backoff which
// will perform exponential backoff based on the attempt number and limited
// by the provided minimum and maximum durations.
func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
mult := math.Pow(2, float64(attemptNum)) * float64(min)
sleep := time.Duration(mult)
if float64(sleep) != mult || sleep > max {
sleep = max
}
return sleep
}
// LinearJitterBackoff provides a callback for Client.Backoff which will
// perform linear backoff based on the attempt number and with jitter to
// prevent a thundering herd.
//
// min and max here are *not* absolute values. The number to be multipled by
// the attempt number will be chosen at random from between them, thus they are
// bounding the jitter.
//
// For instance:
// * To get strictly linear backoff of one second increasing each retry, set
// both to one second (1s, 2s, 3s, 4s, ...)
// * To get a small amount of jitter centered around one second increasing each
// retry, set to around one second, such as a min of 800ms and max of 1200ms
// (892ms, 2102ms, 2945ms, 4312ms, ...)
// * To get extreme jitter, set to a very wide spread, such as a min of 100ms
// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...)
func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
// attemptNum always starts at zero but we want to start at 1 for multiplication
attemptNum++
if max <= min {
// Unclear what to do here, or they are the same, so return min *
// attemptNum
return min * time.Duration(attemptNum)
}
// Seed rand; doing this every time is fine
rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
// Pick a random number that lies somewhere between the min and max and
// multiply by the attemptNum. attemptNum starts at zero so we always
// increment here. We first get a random percentage, then apply that to the
// difference between min and max, and add to min.
jitter := rand.Float64() * float64(max-min)
jitterMin := int64(jitter) + int64(min)
return time.Duration(jitterMin * int64(attemptNum))
}
// PassthroughErrorHandler is an ErrorHandler that directly passes through the
// values from the net/http library for the final request. The body is not
// closed.
func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) {
return resp, err
}
// Do wraps calling an HTTP method with retries.
func (c *Client) Do(req *Request) (*http.Response, error) {
if c.Logger != nil {
c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL)
}
var resp *http.Response
var err error
for i := 0; ; i++ {
var code int // HTTP response code
// Always rewind the request body when non-nil.
if req.body != nil {
body, err := req.body()
if err != nil {
return resp, err
}
if c, ok := body.(io.ReadCloser); ok {
req.Request.Body = c
} else {
req.Request.Body = ioutil.NopCloser(body)
}
}
if c.RequestLogHook != nil {
c.RequestLogHook(c.Logger, req.Request, i)
}
// Attempt the request
resp, err = c.HTTPClient.Do(req.Request)
if resp != nil {
code = resp.StatusCode
}
// Check if we should continue with retries.
checkOK, checkErr := c.CheckRetry(req.Request.Context(), resp, err)
if err != nil {
if c.Logger != nil {
c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err)
}
} else {
// Call this here to maintain the behavior of logging all requests,
// even if CheckRetry signals to stop.
if c.ResponseLogHook != nil {
// Call the response logger function if provided.
c.ResponseLogHook(c.Logger, resp)
}
}
// Now decide if we should continue.
if !checkOK {
if checkErr != nil {
err = checkErr
}
return resp, err
}
// We do this before drainBody beause there's no need for the I/O if
// we're breaking out
remain := c.RetryMax - i
if remain <= 0 {
break
}
// We're going to retry, consume any response to reuse the connection.
if err == nil && resp != nil {
c.drainBody(resp.Body)
}
wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp)
desc := fmt.Sprintf("%s %s", req.Method, req.URL)
if code > 0 {
desc = fmt.Sprintf("%s (status: %d)", desc, code)
}
if c.Logger != nil {
c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
}
time.Sleep(wait)
}
if c.ErrorHandler != nil {
return c.ErrorHandler(resp, err, c.RetryMax+1)
}
// By default, we close the response body and return an error without
// returning the response
if resp != nil {
resp.Body.Close()
}
return nil, fmt.Errorf("%s %s giving up after %d attempts",
req.Method, req.URL, c.RetryMax+1)
}
// Try to read the response body so we can reuse this connection.
func (c *Client) drainBody(body io.ReadCloser) {
defer body.Close()
_, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit))
if err != nil {
if c.Logger != nil {
c.Logger.Printf("[ERR] error reading response body: %v", err)
}
}
}
// Get is a shortcut for doing a GET request without making a new client.
func Get(url string) (*http.Response, error) {
return defaultClient.Get(url)
}
// Get is a convenience helper for doing simple GET requests.
func (c *Client) Get(url string) (*http.Response, error) {
req, err := NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
return c.Do(req)
}
// Head is a shortcut for doing a HEAD request without making a new client.
func Head(url string) (*http.Response, error) {
return defaultClient.Head(url)
}
// Head is a convenience method for doing simple HEAD requests.
func (c *Client) Head(url string) (*http.Response, error) {
req, err := NewRequest("HEAD", url, nil)
if err != nil {
return nil, err
}
return c.Do(req)
}
// Post is a shortcut for doing a POST request without making a new client.
func Post(url, bodyType string, body interface{}) (*http.Response, error) {
return defaultClient.Post(url, bodyType, body)
}
// Post is a convenience method for doing simple POST requests.
func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) {
req, err := NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", bodyType)
return c.Do(req)
}
// PostForm is a shortcut to perform a POST with form data without creating
// a new client.
func PostForm(url string, data url.Values) (*http.Response, error) {
return defaultClient.PostForm(url, data)
}
// PostForm is a convenience method for doing simple POST operations using
// pre-filled url.Values form data.
func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) {
return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}

View File

@@ -1,3 +0,0 @@
module github.com/hashicorp/go-retryablehttp
require github.com/hashicorp/go-cleanhttp v0.5.0

View File

@@ -1,2 +0,0 @@
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=

11
vendor/github.com/koofr/go-httpclient/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,11 @@
/bin
/pkg
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# developer specific
*.sublime-workspace
*.sublime-project

21
vendor/github.com/koofr/go-httpclient/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Koofr
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

15
vendor/github.com/koofr/go-httpclient/README.md generated vendored Normal file
View File

@@ -0,0 +1,15 @@
go-httpclient
=============
Go HTTP client.
[![GoDoc](https://godoc.org/github.com/koofr/go-httpclient?status.png)](https://godoc.org/github.com/koofr/go-httpclient)
## Install
go get github.com/koofr/go-httpclient
## Testing
go get -t
go test

38
vendor/github.com/koofr/go-httpclient/errors.go generated vendored Normal file
View File

@@ -0,0 +1,38 @@
package httpclient
import (
"errors"
"fmt"
"net/http"
)
type InvalidStatusError struct {
Expected []int
Got int
Headers http.Header
Content string
}
func (e InvalidStatusError) Error() string {
return fmt.Sprintf("Invalid response status! Got %d, expected %d; headers: %s, content: %s", e.Got, e.Expected, e.Headers, e.Content)
}
func IsInvalidStatusError(err error) (invalidStatusError *InvalidStatusError, ok bool) {
if ise, ok := err.(InvalidStatusError); ok {
return &ise, true
} else if ise, ok := err.(*InvalidStatusError); ok {
return ise, true
} else {
return nil, false
}
}
func IsInvalidStatusCode(err error, statusCode int) bool {
if ise, ok := IsInvalidStatusError(err); ok {
return ise.Got == statusCode
} else {
return false
}
}
var RateLimitTimeoutError = errors.New("HTTPClient rate limit timeout")

351
vendor/github.com/koofr/go-httpclient/httpclient.go generated vendored Normal file
View File

@@ -0,0 +1,351 @@
package httpclient
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strings"
"time"
)
var XmlHeaderBytes []byte = []byte(xml.Header)
type HTTPClient struct {
BaseURL *url.URL
Headers http.Header
Client *http.Client
PostHooks map[int]func(*http.Request, *http.Response) error
rateLimited bool
rateLimitChan chan struct{}
rateLimitTimeout time.Duration
}
func New() (httpClient *HTTPClient) {
return &HTTPClient{
Client: HttpClient,
Headers: make(http.Header),
PostHooks: make(map[int]func(*http.Request, *http.Response) error),
}
}
func Insecure() (httpClient *HTTPClient) {
httpClient = New()
httpClient.Client = InsecureHttpClient
return httpClient
}
var DefaultClient = New()
func (c *HTTPClient) SetPostHook(onStatus int, hook func(*http.Request, *http.Response) error) {
c.PostHooks[onStatus] = hook
}
func (c *HTTPClient) SetRateLimit(limit int, timeout time.Duration) {
c.rateLimited = true
c.rateLimitChan = make(chan struct{}, limit)
for i := 0; i < limit; i++ {
c.rateLimitChan <- struct{}{}
}
c.rateLimitTimeout = timeout
}
func (c *HTTPClient) buildURL(req *RequestData) *url.URL {
bu := c.BaseURL
rpath := req.Path
if strings.HasSuffix(bu.Path, "/") && strings.HasPrefix(rpath, "/") {
rpath = rpath[1:]
}
opaque := EscapePath(bu.Path + rpath)
u := &url.URL{
Scheme: bu.Scheme,
Host: bu.Host,
Opaque: opaque,
}
if req.Params != nil {
u.RawQuery = req.Params.Encode()
}
return u
}
func (c *HTTPClient) setHeaders(req *RequestData, httpReq *http.Request) {
switch req.RespEncoding {
case EncodingJSON:
httpReq.Header.Set("Accept", "application/json")
case EncodingXML:
httpReq.Header.Set("Accept", "application/xml")
}
if c.Headers != nil {
for key, values := range c.Headers {
for _, value := range values {
httpReq.Header.Set(key, value)
}
}
}
if req.Headers != nil {
for key, values := range req.Headers {
for _, value := range values {
httpReq.Header.Set(key, value)
}
}
}
}
func (c *HTTPClient) checkStatus(req *RequestData, response *http.Response) (err error) {
if req.ExpectedStatus != nil {
statusOk := false
for _, status := range req.ExpectedStatus {
if response.StatusCode == status {
statusOk = true
}
}
if !statusOk {
lr := io.LimitReader(response.Body, 10*1024)
contentBytes, _ := ioutil.ReadAll(lr)
content := string(contentBytes)
err = InvalidStatusError{
Expected: req.ExpectedStatus,
Got: response.StatusCode,
Headers: response.Header,
Content: content,
}
return err
}
}
return nil
}
func (c *HTTPClient) unmarshalResponse(req *RequestData, response *http.Response) (err error) {
var buf []byte
switch req.RespEncoding {
case EncodingJSON:
defer response.Body.Close()
if buf, err = ioutil.ReadAll(response.Body); err != nil {
return err
}
err = json.Unmarshal(buf, req.RespValue)
if err != nil {
return err
}
return nil
case EncodingXML:
defer response.Body.Close()
if buf, err = ioutil.ReadAll(response.Body); err != nil {
return err
}
err = xml.Unmarshal(buf, req.RespValue)
if err != nil {
return err
}
return nil
}
switch req.RespValue.(type) {
case *[]byte:
defer response.Body.Close()
if buf, err = ioutil.ReadAll(response.Body); err != nil {
return err
}
respVal := req.RespValue.(*[]byte)
*respVal = buf
return nil
}
if req.RespConsume {
defer response.Body.Close()
ioutil.ReadAll(response.Body)
}
return nil
}
func (c *HTTPClient) marshalRequest(req *RequestData) (err error) {
if req.ReqReader != nil || req.ReqValue == nil {
return nil
}
if req.Headers == nil {
req.Headers = make(http.Header)
}
var buf []byte
switch req.ReqEncoding {
case EncodingJSON:
buf, err = json.Marshal(req.ReqValue)
if err != nil {
return err
}
req.ReqReader = bytes.NewReader(buf)
req.Headers.Set("Content-Type", "application/json")
req.Headers.Set("Content-Length", fmt.Sprintf("%d", len(buf)))
req.ReqContentLength = int64(len(buf))
return nil
case EncodingXML:
buf, err = xml.Marshal(req.ReqValue)
if err != nil {
return err
}
buf = append(XmlHeaderBytes, buf...)
req.ReqReader = bytes.NewReader(buf)
req.Headers.Set("Content-Type", "application/xml")
req.Headers.Set("Content-Length", fmt.Sprintf("%d", len(buf)))
req.ReqContentLength = int64(len(buf))
return nil
case EncodingForm:
if data, ok := req.ReqValue.(url.Values); ok {
formStr := data.Encode()
req.ReqReader = strings.NewReader(formStr)
req.Headers.Set("Content-Type", "application/x-www-form-urlencoded")
req.Headers.Set("Content-Length", fmt.Sprintf("%d", len(formStr)))
req.ReqContentLength = int64(len(formStr))
return nil
} else {
return fmt.Errorf("HTTPClient: invalid ReqValue type %T", req.ReqValue)
}
}
return fmt.Errorf("HTTPClient: invalid ReqEncoding: %s", req.ReqEncoding)
}
func (c *HTTPClient) runPostHook(req *http.Request, response *http.Response) (err error) {
hook, ok := c.PostHooks[response.StatusCode]
if ok {
err = hook(req, response)
}
return err
}
func (c *HTTPClient) Request(req *RequestData) (response *http.Response, err error) {
err = c.marshalRequest(req)
if err != nil {
return nil, err
}
r, err := http.NewRequest(req.Method, req.FullURL, req.ReqReader)
if err != nil {
return nil, err
}
r.ContentLength = req.ReqContentLength
if req.FullURL == "" {
r.URL = c.buildURL(req)
r.Host = r.URL.Host
}
c.setHeaders(req, r)
if c.rateLimited {
if c.rateLimitTimeout > 0 {
select {
case t := <-c.rateLimitChan:
defer func() {
c.rateLimitChan <- t
}()
case <-time.After(c.rateLimitTimeout):
return nil, RateLimitTimeoutError
}
} else {
t := <-c.rateLimitChan
defer func() {
c.rateLimitChan <- t
}()
}
}
isTraceEnabled := os.Getenv("HTTPCLIENT_TRACE") != ""
if isTraceEnabled {
requestBytes, _ := httputil.DumpRequestOut(r, true)
fmt.Println(string(requestBytes))
}
if req.IgnoreRedirects {
transport := c.Client.Transport
if transport == nil {
transport = http.DefaultTransport
}
response, err = transport.RoundTrip(r)
} else {
response, err = c.Client.Do(r)
}
if isTraceEnabled {
responseBytes, _ := httputil.DumpResponse(response, true)
fmt.Println(string(responseBytes))
}
if err != nil {
return response, err
}
if err = c.runPostHook(r, response); err != nil {
return response, err
}
if err = c.checkStatus(req, response); err != nil {
defer response.Body.Close()
return response, err
}
if err = c.unmarshalResponse(req, response); err != nil {
return response, err
}
return response, nil
}

96
vendor/github.com/koofr/go-httpclient/requestdata.go generated vendored Normal file
View File

@@ -0,0 +1,96 @@
package httpclient
import (
"io"
"net/http"
"net/url"
)
type Encoding string
const (
EncodingJSON = "JSON"
EncodingXML = "XML"
EncodingForm = "Form"
)
type RequestData struct {
Method string
Path string
Params url.Values
FullURL string // client.BaseURL + Path or FullURL
Headers http.Header
ReqReader io.Reader
ReqEncoding Encoding
ReqValue interface{}
ReqContentLength int64
ExpectedStatus []int
IgnoreRedirects bool
RespEncoding Encoding
RespValue interface{}
RespConsume bool
}
func (r *RequestData) CanCopy() bool {
if r.ReqReader != nil {
return false
}
return true
}
func (r *RequestData) Copy() (ok bool, nr *RequestData) {
if !r.CanCopy() {
return false, nil
}
nr = &RequestData{
Method: r.Method,
Path: r.Path,
FullURL: r.FullURL,
ReqEncoding: r.ReqEncoding,
ReqValue: r.ReqValue,
IgnoreRedirects: r.IgnoreRedirects,
RespEncoding: r.RespEncoding,
RespValue: r.RespValue,
RespConsume: r.RespConsume,
}
if r.Params != nil {
nr.Params = make(url.Values)
for k, vs := range r.Params {
nvs := make([]string, len(vs))
for i, v := range vs {
nvs[i] = v
}
nr.Params[k] = nvs
}
}
if r.Headers != nil {
nr.Headers = make(http.Header)
for k, vs := range r.Headers {
nvs := make([]string, len(vs))
for i, v := range vs {
nvs[i] = v
}
nr.Headers[k] = nvs
}
}
if r.ExpectedStatus != nil {
nr.ExpectedStatus = make([]int, len(r.ExpectedStatus))
for i, v := range r.ExpectedStatus {
nr.ExpectedStatus[i] = v
}
}
return true, nr
}

View File

@@ -0,0 +1,62 @@
package httpclient
import (
"io"
"mime/multipart"
"net/http"
)
func (req *RequestData) UploadFile(fieldName string, fileName string, reader io.Reader) (err error) {
return req.UploadFileExtra(fieldName, fileName, reader, nil)
}
func (req *RequestData) UploadFileExtra(fieldName string, fileName string, reader io.Reader, extra map[string]string) (err error) {
r, w := io.Pipe()
writer := multipart.NewWriter(w)
go func() {
var err error
defer func() {
if err == nil {
w.Close()
}
}()
for k, v := range extra {
err = writer.WriteField(k, v)
if err != nil {
w.CloseWithError(err)
return
}
}
part, err := writer.CreateFormFile(fieldName, fileName)
if err != nil {
w.CloseWithError(err)
return
}
defer writer.Close()
_, err = io.Copy(part, reader)
if err != nil {
w.CloseWithError(err)
return
}
}()
req.ReqReader = r
if req.Headers == nil {
req.Headers = make(http.Header)
}
req.Headers.Set("Content-Type", writer.FormDataContentType())
return
}

29
vendor/github.com/koofr/go-httpclient/transport.go generated vendored Normal file
View File

@@ -0,0 +1,29 @@
package httpclient
import (
"crypto/tls"
"net/http"
)
var HttpTransport = &http.Transport{
DisableCompression: true,
Proxy: http.ProxyFromEnvironment,
}
var HttpClient = &http.Client{
Transport: HttpTransport,
}
var InsecureTlsConfig = &tls.Config{
InsecureSkipVerify: true,
}
var InsecureHttpTransport = &http.Transport{
TLSClientConfig: InsecureTlsConfig,
DisableCompression: true,
Proxy: http.ProxyFromEnvironment,
}
var InsecureHttpClient = &http.Client{
Transport: InsecureHttpTransport,
}

14
vendor/github.com/koofr/go-httpclient/utils.go generated vendored Normal file
View File

@@ -0,0 +1,14 @@
package httpclient
import (
"net/url"
"strings"
)
func EscapePath(path string) string {
u := url.URL{
Path: path,
}
return strings.Replace(u.String(), "+", "%2b", -1)
}

11
vendor/github.com/koofr/go-koofrclient/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,11 @@
/bin
/pkg
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# developer specific
*.sublime-workspace
*.sublime-project

21
vendor/github.com/koofr/go-koofrclient/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Koofr
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

15
vendor/github.com/koofr/go-koofrclient/README.md generated vendored Normal file
View File

@@ -0,0 +1,15 @@
go-koofrclient
===========
Go Koofr client.
[![GoDoc](https://godoc.org/github.com/koofr/go-koofrclient?status.png)](https://godoc.org/github.com/koofr/go-koofrclient)
## Install
go get github.com/koofr/go-koofrclient
## Testing
go get -t
KOOFR_APIBASE="https://app.koofr.net" KOOFR_EMAIL="email@example.com" KOOFR_PASSWORD="yourpassword" go test

217
vendor/github.com/koofr/go-koofrclient/api_scheme.go generated vendored Normal file
View File

@@ -0,0 +1,217 @@
package koofrclient
import (
"path"
)
type TokenRequest struct {
Email string `json:"email"`
Password string `json:"password"`
}
type Token struct {
Token string
}
type MountType string
const (
MountDeviceType = "device"
MountExportType = "export"
MountImportType = "import"
)
type Mount struct {
Id string `json:"id"`
Name string `json:"name"`
Type MountType `json:"type"`
Origin string `json:"origin"`
SpaceTotal int64 `json:"spaceTotal"`
SpaceUsed int64 `json:"spaceUsed"`
Online bool `json:"online"`
Owner MountUser `json:"owner"`
Users []MountUser `json:"users"`
Groups []MountGroup `json:"groups"`
Version int `json:"version"`
Permissions MountPermissions `json:"permissions"`
IsPrimary bool `json:"isPrimary"`
IsShared bool `json:"isShared"`
}
type MountUser struct {
Id string `json:"id"`
Name string `json:"name"`
Email string `json:"email"`
Permissions MountPermissions `json:"permissions"`
}
type MountGroup struct {
Id string `json:"id"`
Name string `json:"name"`
Permissions MountPermissions `json:"permissions"`
}
type MountPermissions struct {
Read bool `json:"READ"`
Write bool `json:"write"`
Owner bool `json:"OWNER"`
Mount bool `json:"MOUNT"`
CreateReceiver bool `json:"CREATE_RECEIVER"`
CreateLink bool `json:"CREATE_LINK"`
CreateAction bool `json:"CREATE_ACTION"`
Comment bool `json:"COMMENT"`
}
type DeviceProvider string
const (
StorageHubProvider = "storagehub"
StorageBlobProvider = "storageblob"
)
type Device struct {
Id string `json:"id"`
ApiKey string `json:"apiKey"`
Name string `json:"name"`
Status string `json:"status"`
SpaceTotal int64 `json:"spaceTotal"`
SpaceUsed int64 `json:"spaceUsed"`
SpaceFree int64 `json:"spaceFree"`
Version int `json:"version"`
Provider struct {
Name string `json:"name"`
Data interface{} `json:"data"`
} `json:"provider"`
ReadOnly bool `json:"readonly"`
RootMountId string `json:"rootMountId"`
}
type DeviceCreate struct {
Name string `json:"name"`
ProviderName DeviceProvider `json:"providerName"`
}
type DeviceUpdate struct {
Name string `json:"name"`
}
type FolderCreate struct {
Name string `json:"name"`
}
type FileCopy struct {
ToMountId string `json:"toMountId"`
TPath string `json:"toPath"`
}
type FileMove struct {
ToMountId string `json:"toMountId"`
TPath string `json:"toPath"`
}
type FileSpan struct {
Start int64
End int64
}
type FileUpload struct {
Name string `json:"name"`
}
type PutFilter struct {
Modified *int64
Size *int64
Hash *string
IgnoreNonExisting bool
NoRename bool
ForceOverwrite bool
}
type DeleteFilter struct {
Modified *int64
Size *int64
Hash *string
IfEmpty bool
}
type FileInfo struct {
Name string `json:"name"`
Type string `json:"type"`
Modified int64 `json:"modified"`
Size int64 `json:"size"`
ContentType string `json:"contentType"`
Path string `json:"path"`
Hash string `json:"hash"`
}
type FileTree struct {
FileInfo
Children []*FileTree `json:"children"`
}
func (tree *FileTree) Flatten() []FileInfo {
trees := []*FileTree{tree}
for i := 0; i < len(trees); i++ {
tree := trees[i]
for _, child := range tree.Children {
child.Name = path.Join(tree.Name, child.Name)
trees = append(trees, child)
}
}
infos := make([]FileInfo, len(trees))
for i, tree := range trees {
infos[i] = tree.FileInfo
}
return infos
}
type User struct {
Id string `json:"id"`
FirstName string `json:"firstName"`
LastName string `json:"lastName"`
Email string `json:"email"`
}
type Shared struct {
Name string `json:name`
Type MountType `json:type`
Modified int64 `json:modified`
Size int64 `json:size`
ContentType string `json:contentType`
Hash string `json:hash`
Mount Mount `json:mount`
Link Link `json:link`
Receiver Receiver `json:receiver`
}
type Link struct {
Id string `json:id`
Name string `json:name`
Path string `json:path`
Counter int64 `json:counter`
Url string `json:url`
ShortUrl string `json:shortUrl`
Hash string `json:hash`
Host string `json:host`
HasPassword bool `json:hasPassword`
Password string `json:password`
ValidFrom int64 `json:validFrom`
ValidTo int64 `json:validTo`
PasswordRequired bool `json:passwordRequired`
}
type Receiver struct {
Id string `json:id`
Name string `json:name`
Path string `json:path`
Counter int64 `json:counter`
Url string `json:url`
ShortUrl string `json:shortUrl`
Hash string `json:hash`
Host string `json:host`
HasPassword bool `json:hasPassword`
Password string `json:password`
ValidFrom int64 `json:validFrom`
ValidTo int64 `json:validTo`
Alert bool `json:alert`
}

89
vendor/github.com/koofr/go-koofrclient/client.go generated vendored Normal file
View File

@@ -0,0 +1,89 @@
package koofrclient
import (
"fmt"
"net/http"
"net/url"
"github.com/koofr/go-httpclient"
)
type KoofrClient struct {
*httpclient.HTTPClient
token string
userID string
}
func NewKoofrClient(baseUrl string, disableSecurity bool) *KoofrClient {
var httpClient *httpclient.HTTPClient
if disableSecurity {
httpClient = httpclient.Insecure()
} else {
httpClient = httpclient.New()
}
apiBaseUrl, _ := url.Parse(baseUrl)
httpClient.BaseURL = apiBaseUrl
httpClient.Headers.Set("User-Agent", "go koofrclient")
return &KoofrClient{
HTTPClient: httpClient,
token: "",
userID: "",
}
}
func (c *KoofrClient) SetUserAgent(ua string) {
c.Headers.Set("User-Agent", ua)
}
func (c *KoofrClient) SetToken(token string) {
c.token = token
c.HTTPClient.Headers.Set("Authorization", fmt.Sprintf("Token token=%s", token))
}
func (c *KoofrClient) GetToken() string {
return c.token
}
func (c *KoofrClient) SetUserID(userID string) {
c.userID = userID
}
func (c *KoofrClient) GetUserID() string {
return c.userID
}
func (c *KoofrClient) Authenticate(email string, password string) (err error) {
var tokenResponse Token
tokenRequest := TokenRequest{
Email: email,
Password: password,
}
request := httpclient.RequestData{
Method: "POST",
Path: "/token",
Headers: make(http.Header),
ExpectedStatus: []int{http.StatusOK},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: tokenRequest,
RespEncoding: httpclient.EncodingJSON,
RespValue: &tokenResponse,
}
res, err := c.Request(&request)
if err != nil {
return
}
c.SetToken(tokenResponse.Token)
c.SetUserID(res.Header.Get("X-User-ID"))
return
}

View File

@@ -0,0 +1,84 @@
package koofrclient
import (
"github.com/koofr/go-httpclient"
"net/http"
)
func (c *KoofrClient) Devices() (devices []Device, err error) {
d := &struct {
Devices *[]Device
}{&devices}
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/devices",
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &d,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) DevicesCreate(name string, provider DeviceProvider) (device Device, err error) {
deviceCreate := DeviceCreate{name, provider}
request := httpclient.RequestData{
Method: "POST",
Path: "/api/v2/devices",
ExpectedStatus: []int{http.StatusCreated},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: deviceCreate,
RespEncoding: httpclient.EncodingJSON,
RespValue: &device,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) DevicesDetails(deviceId string) (device Device, err error) {
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/devices/" + deviceId,
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &device,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) DevicesUpdate(deviceId string, deviceUpdate DeviceUpdate) (err error) {
request := httpclient.RequestData{
Method: "PUT",
Path: "/api/v2/devices/" + deviceId,
ExpectedStatus: []int{http.StatusNoContent},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: deviceUpdate,
RespConsume: true,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) DevicesDelete(deviceId string) (err error) {
request := httpclient.RequestData{
Method: "DELETE",
Path: "/api/v2/devices/" + deviceId,
ExpectedStatus: []int{http.StatusNoContent},
RespConsume: true,
}
_, err = c.Request(&request)
return
}

294
vendor/github.com/koofr/go-koofrclient/client_files.go generated vendored Normal file
View File

@@ -0,0 +1,294 @@
package koofrclient
import (
"fmt"
"io"
"net/http"
"net/url"
"path"
"github.com/koofr/go-httpclient"
)
var ErrCannotOverwrite = fmt.Errorf("Can not overwrite (filter constraint fails)")
var ErrCannotRemove = fmt.Errorf("Can not remove (filter constraint fails)")
func (c *KoofrClient) FilesInfo(mountId string, path string) (info FileInfo, err error) {
params := url.Values{}
params.Set("path", path)
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/mounts/" + mountId + "/files/info",
Params: params,
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &info,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) FilesList(mountId string, basePath string) (files []FileInfo, err error) {
f := &struct {
Files *[]FileInfo
}{&files}
params := url.Values{}
params.Set("path", basePath)
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/mounts/" + mountId + "/files/list",
Params: params,
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &f,
}
_, err = c.Request(&request)
if err != nil {
return
}
for i := range files {
files[i].Path = path.Join(basePath, files[i].Name)
}
return
}
func (c *KoofrClient) FilesTree(mountId string, path string) (tree FileTree, err error) {
params := url.Values{}
params.Set("path", path)
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/mounts/" + mountId + "/files/tree",
Params: params,
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &tree,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) FilesDelete(mountId string, path string) (err error) {
return c.filesDelete(mountId, path, nil)
}
func (c *KoofrClient) FilesDeleteIf(mountId string, path string, deleteFilter *DeleteFilter) (err error) {
return c.filesDelete(mountId, path, deleteFilter)
}
func (c *KoofrClient) filesDelete(mountId string, path string, deleteFilter *DeleteFilter) (err error) {
params := url.Values{}
params.Set("path", path)
if deleteFilter != nil {
if deleteFilter.Size != nil {
params.Set("removeIfSize", fmt.Sprintf("%d", *deleteFilter.Size))
}
if deleteFilter.Modified != nil {
params.Set("removeIfModified", fmt.Sprintf("%d", *deleteFilter.Modified))
}
if deleteFilter.Hash != nil {
params.Set("removeIfHash", fmt.Sprintf("%s", *deleteFilter.Hash))
}
if deleteFilter.IfEmpty {
params.Set("removeIfEmpty", "")
}
}
request := httpclient.RequestData{
Method: "DELETE",
Path: "/api/v2/mounts/" + mountId + "/files/remove",
Params: params,
ExpectedStatus: []int{http.StatusOK},
RespConsume: true,
}
_, err = c.Request(&request)
if err != nil {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusConflict {
return ErrCannotRemove
}
default:
return err
}
}
return
}
func (c *KoofrClient) FilesNewFolder(mountId string, path string, name string) (err error) {
reqData := FolderCreate{name}
params := url.Values{}
params.Set("path", path)
request := httpclient.RequestData{
Method: "POST",
Path: "/api/v2/mounts/" + mountId + "/files/folder",
Params: params,
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: reqData,
RespConsume: true,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) FilesCopy(mountId string, path string, toMountId string, toPath string) (err error) {
reqData := FileCopy{toMountId, toPath}
params := url.Values{}
params.Set("path", path)
request := httpclient.RequestData{
Method: "PUT",
Path: "/api/v2/mounts/" + mountId + "/files/copy",
Params: params,
ExpectedStatus: []int{http.StatusOK},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: reqData,
RespConsume: true,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) FilesMove(mountId string, path string, toMountId string, toPath string) (err error) {
reqData := FileMove{toMountId, toPath}
params := url.Values{}
params.Set("path", path)
request := httpclient.RequestData{
Method: "PUT",
Path: "/api/v2/mounts/" + mountId + "/files/move",
Params: params,
ExpectedStatus: []int{http.StatusOK},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: reqData,
RespConsume: true,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) FilesGetRange(mountId string, path string, span *FileSpan) (reader io.ReadCloser, err error) {
params := url.Values{}
params.Set("path", path)
request := httpclient.RequestData{
Method: "GET",
Path: "/content/api/v2/mounts/" + mountId + "/files/get",
Params: params,
Headers: make(http.Header),
ExpectedStatus: []int{http.StatusOK, http.StatusPartialContent},
}
if span != nil {
if span.End == -1 {
request.Headers.Set("Range", fmt.Sprintf("bytes=%d-", span.Start))
} else {
request.Headers.Set("Range", fmt.Sprintf("bytes=%d-%d", span.Start, span.End))
}
}
res, err := c.Request(&request)
if err != nil {
return
}
reader = res.Body
return
}
func (c *KoofrClient) FilesGet(mountId string, path string) (reader io.ReadCloser, err error) {
return c.FilesGetRange(mountId, path, nil)
}
func (c *KoofrClient) FilesPut(mountId string, path string, name string, reader io.Reader) (newName string, err error) {
info, err := c.FilesPutOptions(mountId, path, name, reader, nil)
return info.Name, err
}
func (c *KoofrClient) FilesPutOptions(mountId string, path string, name string, reader io.Reader, putFilter *PutFilter) (fileInfo *FileInfo, err error) {
params := url.Values{}
params.Set("path", path)
params.Set("filename", name)
params.Set("info", "true")
if putFilter != nil {
if putFilter.Size != nil {
params.Set("overwriteIfSize", fmt.Sprintf("%d", *putFilter.Size))
}
if putFilter.Modified != nil {
params.Set("overwriteIfModified", fmt.Sprintf("%d", *putFilter.Modified))
}
if putFilter.Hash != nil {
params.Set("overwriteIfHash", fmt.Sprintf("%s", *putFilter.Hash))
}
if putFilter.IgnoreNonExisting {
params.Set("overwriteIgnoreNonexisting", "")
}
if putFilter.NoRename {
params.Set("autorename", "false")
}
if putFilter.ForceOverwrite {
params.Set("overwrite", "true")
}
}
request := httpclient.RequestData{
Method: "POST",
Path: "/content/api/v2/mounts/" + mountId + "/files/put",
Params: params,
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &fileInfo,
}
err = request.UploadFile("file", "dummy", reader)
if err != nil {
return
}
_, err = c.Request(&request)
if err != nil {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusConflict {
return nil, ErrCannotOverwrite
}
default:
return nil, err
}
}
return
}

38
vendor/github.com/koofr/go-koofrclient/client_mount.go generated vendored Normal file
View File

@@ -0,0 +1,38 @@
package koofrclient
import (
"github.com/koofr/go-httpclient"
"net/http"
)
func (c *KoofrClient) Mounts() (mounts []Mount, err error) {
d := &struct {
Mounts *[]Mount
}{&mounts}
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/mounts",
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &d,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) MountsDetails(mountId string) (mount Mount, err error) {
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/mounts/" + mountId,
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &mount,
}
_, err = c.Request(&request)
return
}

View File

@@ -0,0 +1,25 @@
package koofrclient
import (
"net/http"
"github.com/koofr/go-httpclient"
)
func (c *KoofrClient) Shared() (shared []Shared, err error) {
d := &struct {
Files *[]Shared
}{&shared}
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/shared",
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &d,
}
_, err = c.Request(&request)
return
}

20
vendor/github.com/koofr/go-koofrclient/client_user.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
package koofrclient
import (
"github.com/koofr/go-httpclient"
"net/http"
)
func (c *KoofrClient) UserInfo() (user User, err error) {
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/user",
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &user,
}
_, err = c.Request(&request)
return
}

View File

@@ -894,6 +894,11 @@ func (m *Mega) NewDownload(src *Node) (*Download, error) {
return nil, err
}
// DownloadResp has an embedded error in it for some reason
if res[0].Err != 0 {
return nil, parseError(res[0].Err)
}
_, err = decryptAttr(key, []byte(res[0].Attr))
if err != nil {
return nil, err

Some files were not shown because too many files have changed in this diff Show More