mirror of
https://github.com/rclone/rclone.git
synced 2026-01-24 05:13:23 +00:00
Compare commits
4 Commits
fix-mount-
...
adb-remote
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3e1a0f368 | ||
|
|
2683939a4b | ||
|
|
ed88ae878e | ||
|
|
2ba5c35e88 |
109
.travis.yml
109
.travis.yml
@@ -1,33 +1,27 @@
|
||||
---
|
||||
language: go
|
||||
sudo: required
|
||||
dist: trusty
|
||||
os:
|
||||
- linux
|
||||
- linux
|
||||
go:
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12rc1
|
||||
- tip
|
||||
go_import_path: github.com/ncw/rclone
|
||||
before_install:
|
||||
- git fetch --unshallow --tags
|
||||
- |
|
||||
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
|
||||
choco install -y winfsp zip make
|
||||
cd ../.. # fix crlf in git checkout
|
||||
mv $TRAVIS_REPO_SLUG _old
|
||||
git config --global core.autocrlf false
|
||||
git clone _old $TRAVIS_REPO_SLUG
|
||||
cd $TRAVIS_REPO_SLUG
|
||||
fi
|
||||
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
|
||||
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
|
||||
install:
|
||||
- make vars
|
||||
- git fetch --unshallow --tags
|
||||
- make vars
|
||||
- make build_dep
|
||||
script:
|
||||
- make check
|
||||
- make quicktest
|
||||
- make compile_all
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
@@ -38,66 +32,23 @@ env:
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
- go: tip
|
||||
include:
|
||||
- go: 1.8.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.9.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.10.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.11.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
env:
|
||||
- GOTAGS=cmount
|
||||
script:
|
||||
- make build_dep
|
||||
- make check
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
- make compile_all
|
||||
- os: osx
|
||||
go: 1.12.x
|
||||
env:
|
||||
- GOTAGS= # cmount doesn't work on osx travis for some reason
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
script:
|
||||
- make
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
# - os: windows
|
||||
# go: 1.12.x
|
||||
# env:
|
||||
# - GOTAGS=cmount
|
||||
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
|
||||
# #filter_secrets: false # works around a problem with secrets under windows
|
||||
# cache:
|
||||
# directories:
|
||||
# - ${LocalAppData}/go-build
|
||||
# script:
|
||||
# - make
|
||||
# - make quicktest
|
||||
# - make racequicktest
|
||||
- go: tip
|
||||
script:
|
||||
- make quicktest
|
||||
|
||||
- os: osx
|
||||
go: 1.12rc1
|
||||
env: GOTAGS=""
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
@@ -105,5 +56,5 @@ deploy:
|
||||
on:
|
||||
repo: ncw/rclone
|
||||
all_branches: true
|
||||
go: 1.12.x
|
||||
condition: $TRAVIS_PULL_REQUEST == false && $TRAVIS_OS_NAME != "windows"
|
||||
go: 1.12rc1
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
22
Makefile
22
Makefile
@@ -17,6 +17,8 @@ ifneq ($(TAG),$(LAST_TAG))
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
# Run full tests if go >= go1.11
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
@@ -40,6 +42,7 @@ vars:
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo FULL_TESTS="'$(FULL_TESTS)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
version:
|
||||
@@ -54,22 +57,28 @@ test: rclone
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||
|
||||
racequicktest:
|
||||
ifdef FULL_TESTS
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||
endif
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
ifdef FULL_TESTS
|
||||
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
|
||||
@# see: https://github.com/golangci/golangci-lint/issues/204
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
@golangci-lint run ./...
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
else
|
||||
@echo Skipping source quality tests as version of go too old
|
||||
endif
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
ifdef FULL_TESTS
|
||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
endif
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
@@ -153,7 +162,11 @@ log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
ifdef FULL_TESTS
|
||||
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
|
||||
else
|
||||
@echo Skipping compile all as version of go too old
|
||||
endif
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
@@ -173,11 +186,6 @@ BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
endif
|
||||
ifeq ($(TRAVIS_OS_NAME),windows)
|
||||
# BUILD_FLAGS := -include "^windows/" -cgo
|
||||
# 386 doesn't build yet
|
||||
BUILD_FLAGS := -include "^windows/amd64" -cgo
|
||||
endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
|
||||
@@ -36,7 +36,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
|
||||
821
backend/adb/adb.go
Normal file
821
backend/adb/adb.go
Normal file
@@ -0,0 +1,821 @@
|
||||
package adb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
adb "github.com/thinkhy/go-adb"
|
||||
"github.com/thinkhy/go-adb/wire"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "adb",
|
||||
Description: "Android Debug Bridge",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "serial",
|
||||
Help: "The device serial to use. Leave empty for auto selection.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "host",
|
||||
Default: "localhost",
|
||||
Help: "The ADB server host.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Default: 5037,
|
||||
Help: "The ADB server port.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "executable",
|
||||
Help: "The ADB executable path.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Serial string
|
||||
Host string
|
||||
Port uint16
|
||||
Executable string
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
}
|
||||
|
||||
// Fs represents a adb device
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
client *adb.Adb
|
||||
device *execDevice
|
||||
statFunc statFunc
|
||||
statFuncMu sync.Mutex
|
||||
touchFunc touchFunc
|
||||
touchFuncMu sync.Mutex
|
||||
}
|
||||
|
||||
// Object describes a adb file
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("ADB root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
root = "/"
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
statFunc: (*Object).statTry,
|
||||
touchFunc: (*Object).touchTry,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
f.client, err = adb.NewWithConfig(adb.ServerConfig{
|
||||
Host: opt.Host,
|
||||
Port: int(opt.Port),
|
||||
PathToAdb: opt.Executable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not configure ADB server")
|
||||
}
|
||||
err = f.client.StartServer()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not start ADB server")
|
||||
}
|
||||
|
||||
serverVersion, err := f.client.ServerVersion()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get ADB server version")
|
||||
}
|
||||
fs.Debugf(f, "ADB server version: 0x%X", serverVersion)
|
||||
|
||||
serials, err := f.client.ListDeviceSerials()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get ADB devices")
|
||||
}
|
||||
descriptor := adb.AnyDevice()
|
||||
if opt.Serial != "" {
|
||||
descriptor = adb.DeviceWithSerial(opt.Serial)
|
||||
}
|
||||
if len(serials) > 1 && opt.Serial == "" {
|
||||
return nil, errors.New("Multiple ADB devices found. Use the serial config to select a specific device")
|
||||
}
|
||||
f.device = &execDevice{f.client.Device(descriptor)}
|
||||
|
||||
// follow symlinks for root pathes
|
||||
entry, err := f.newEntryFollowSymlinks("")
|
||||
switch err {
|
||||
case nil:
|
||||
case fs.ErrorObjectNotFound:
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
switch entry.(type) {
|
||||
case fs.Object:
|
||||
f.root = path.Dir(f.root)
|
||||
return f, fs.ErrorIsFile
|
||||
case nil:
|
||||
return f, nil
|
||||
case fs.Directory:
|
||||
return f, nil
|
||||
default:
|
||||
return nil, errors.Errorf("Invalid root entry type %t", entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Precision of the object storage system
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return 1 * time.Second
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
p := path.Join(f.root, dir)
|
||||
dirEntries, err := f.device.ListDirEntries(p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ListDirEntries")
|
||||
}
|
||||
|
||||
defer fs.CheckClose(dirEntries, &err)
|
||||
|
||||
found := false
|
||||
for dirEntries.Next() {
|
||||
found = true
|
||||
dirEntry := dirEntries.Entry()
|
||||
switch dirEntry.Name {
|
||||
case ".", "..":
|
||||
continue
|
||||
}
|
||||
fsEntry, err := f.entryForDirEntry(path.Join(dir, dirEntry.Name), dirEntry, f.opt.FollowSymlinks)
|
||||
if err != nil {
|
||||
fs.Errorf(p, "Listing error: %q: %v", dirEntry.Name, err)
|
||||
return nil, err
|
||||
} else if fsEntry != nil {
|
||||
entries = append(entries, fsEntry)
|
||||
} else {
|
||||
fs.Debugf(f, "Skipping DirEntry %#v", dirEntry)
|
||||
}
|
||||
}
|
||||
err = dirEntries.Err()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ListDirEntries")
|
||||
}
|
||||
if !found {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) entryForDirEntry(remote string, e *adb.DirEntry, followSymlinks bool) (fs.DirEntry, error) {
|
||||
o := f.newObjectWithInfo(remote, e)
|
||||
// Follow symlinks if required
|
||||
if followSymlinks && (e.Mode&os.ModeSymlink) != 0 {
|
||||
err := f.statFunc(&o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return fs.NewDir(remote, o.modTime), nil
|
||||
}
|
||||
return &o, nil
|
||||
}
|
||||
|
||||
func (f *Fs) newEntry(remote string) (fs.DirEntry, error) {
|
||||
return f.newEntryWithFollow(remote, f.opt.FollowSymlinks)
|
||||
}
|
||||
func (f *Fs) newEntryFollowSymlinks(remote string) (fs.DirEntry, error) {
|
||||
return f.newEntryWithFollow(remote, true)
|
||||
}
|
||||
func (f *Fs) newEntryWithFollow(remote string, followSymlinks bool) (fs.DirEntry, error) {
|
||||
entry, err := f.device.Stat(path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
if adb.HasErrCode(err, adb.FileNoExistError) {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, errors.Wrapf(err, "Stat failed")
|
||||
}
|
||||
return f.entryForDirEntry(remote, entry, followSymlinks)
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectWithInfo(remote string, e *adb.DirEntry) Object {
|
||||
return Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(e.Size),
|
||||
mode: e.Mode,
|
||||
modTime: e.ModifiedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
entry, err := f.newEntry(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, ok := entry.(fs.Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||
// return an error or upload it properly (rather than e.g. calling panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
// Temporary Object under construction - info filled in by Update()
|
||||
o := f.newObject(remote)
|
||||
err := o.Update(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
func (f *Fs) newObject(remote string) *Object {
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
p := path.Join(f.root, dir)
|
||||
output, code, err := f.device.execCommandWithExitCode("mkdir -p", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
entry, _ := f.newEntry(p)
|
||||
if _, ok := entry.(fs.Directory); ok {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("mkdir %q failed with %d: %q", dir, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "mkdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
p := path.Join(f.root, dir)
|
||||
output, code, err := f.device.execCommandWithExitCode("rmdir", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("rmdir %q failed with %d: %q", dir, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "rmdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(t time.Time) error {
|
||||
return o.fs.touchFunc(o, t)
|
||||
}
|
||||
|
||||
func (o *Object) stat() error {
|
||||
return o.statStatArg(statArgC, path.Join(o.fs.root, o.remote))
|
||||
}
|
||||
|
||||
func (o *Object) setMetadata(entry *adb.DirEntry) {
|
||||
// Don't overwrite the values if we don't need to
|
||||
// this avoids upsetting the race detector
|
||||
if o.size != int64(entry.Size) {
|
||||
o.size = int64(entry.Size)
|
||||
}
|
||||
if !o.modTime.Equal(entry.ModifiedAt) {
|
||||
o.modTime = entry.ModifiedAt
|
||||
}
|
||||
if o.mode != entry.Mode {
|
||||
o.mode = decodeEntryMode(uint32(entry.Mode))
|
||||
}
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
const blockSize = 1 << 12
|
||||
|
||||
var offset, count int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if offset > o.size {
|
||||
offset = o.size
|
||||
}
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
} else if count+offset > o.size {
|
||||
count = o.size - offset
|
||||
}
|
||||
fs.Debugf(o, "Open: remote: %q offset: %d count: %d", o.remote, offset, count)
|
||||
|
||||
if count == 0 {
|
||||
return ioutil.NopCloser(bytes.NewReader(nil)), nil
|
||||
}
|
||||
offsetBlocks, offsetRest := offset/blockSize, offset%blockSize
|
||||
countBlocks := (count-1)/blockSize + 1
|
||||
|
||||
conn, err := o.fs.device.execCommand(fmt.Sprintf("sh -c 'dd \"if=$0\" bs=%d skip=%d count=%d 2>/dev/null'", blockSize, offsetBlocks, countBlocks), path.Join(o.fs.root, o.remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &adbReader{
|
||||
ReadCloser: readers.NewLimitedReadCloser(conn, count+offsetRest),
|
||||
skip: offsetRest,
|
||||
expected: count,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
for _, option := range options {
|
||||
if option.Mandatory() {
|
||||
fs.Logf(option, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
written, err := o.writeToFile(path.Join(o.fs.root, o.remote), in, 0666, src.ModTime())
|
||||
if err != nil {
|
||||
if removeErr := o.Remove(); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
expected := src.Size()
|
||||
if expected == -1 {
|
||||
expected = written
|
||||
}
|
||||
for _, t := range []int64{100, 250, 500, 1000, 2500, 5000, 10000} {
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if o.size == expected {
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(o, "Invalid size after update, expected: %d got: %d", expected, o.size)
|
||||
time.Sleep(time.Duration(t) * time.Millisecond)
|
||||
}
|
||||
return o.stat()
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("rm", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("rm %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "rm")
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) writeToFile(path string, rd io.Reader, perms os.FileMode, modeTime time.Time) (written int64, err error) {
|
||||
dst, err := o.fs.device.OpenWrite(path, perms, modeTime)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fs.CheckClose(dst, &err)
|
||||
return io.Copy(dst, rd)
|
||||
}
|
||||
|
||||
type statFunc func(*Object) error
|
||||
|
||||
func (o *Object) statTry() error {
|
||||
o.fs.statFuncMu.Lock()
|
||||
defer o.fs.statFuncMu.Unlock()
|
||||
|
||||
for _, f := range []statFunc{
|
||||
(*Object).statStatL, (*Object).statRealPath, (*Object).statReadLink,
|
||||
} {
|
||||
err := f(o)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "%s", err)
|
||||
} else {
|
||||
o.fs.statFunc = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unable to resolve link target")
|
||||
}
|
||||
|
||||
const (
|
||||
statArgLc = "-Lc"
|
||||
statArgC = "-c"
|
||||
)
|
||||
|
||||
func (o *Object) statStatL() error {
|
||||
return o.statStatArg(statArgLc, path.Join(o.fs.root, o.remote))
|
||||
}
|
||||
|
||||
func (o *Object) statStatArg(arg, path string) error {
|
||||
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("stat %s %s", arg, "%f,%s,%Y"), path)
|
||||
output = strings.TrimSpace(output)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("stat %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "stat")
|
||||
}
|
||||
|
||||
parts := strings.Split(output, ",")
|
||||
if len(parts) != 3 {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
|
||||
mode, err := strconv.ParseUint(parts[0], 16, 32)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
size, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
modTime, err := strconv.ParseInt(parts[2], 10, 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
|
||||
o.size = int64(size)
|
||||
o.modTime = time.Unix(modTime, 0)
|
||||
o.mode = decodeEntryMode(uint32(mode))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) statReadLink() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("readlink -f", p)
|
||||
output = strings.TrimSuffix(output, "\n")
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("readlink %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "readlink")
|
||||
}
|
||||
return o.statStatArg(statArgC, output)
|
||||
}
|
||||
func (o *Object) statRealPath() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("realpath", p)
|
||||
output = strings.TrimSuffix(output, "\n")
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("realpath %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "realpath")
|
||||
}
|
||||
return o.statStatArg(statArgC, output)
|
||||
}
|
||||
|
||||
type touchFunc func(*Object, time.Time) error
|
||||
|
||||
func (o *Object) touchTry(t time.Time) error {
|
||||
o.fs.touchFuncMu.Lock()
|
||||
defer o.fs.touchFuncMu.Unlock()
|
||||
|
||||
for _, f := range []touchFunc{
|
||||
(*Object).touchCmd, (*Object).touchCd,
|
||||
} {
|
||||
err := f(o, t)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "%s", err)
|
||||
} else {
|
||||
o.fs.touchFunc = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unable to resolve link target")
|
||||
}
|
||||
|
||||
const (
|
||||
touchArgCmd = "-cmd"
|
||||
touchArgCd = "-cd"
|
||||
)
|
||||
|
||||
func (o *Object) touchCmd(t time.Time) error {
|
||||
return o.touchStatArg(touchArgCmd, path.Join(o.fs.root, o.remote), t)
|
||||
}
|
||||
func (o *Object) touchCd(t time.Time) error {
|
||||
return o.touchStatArg(touchArgCd, path.Join(o.fs.root, o.remote), t)
|
||||
}
|
||||
|
||||
func (o *Object) touchStatArg(arg, path string, t time.Time) error {
|
||||
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("touch %s %s", arg, t.Format(time.RFC3339Nano)), path)
|
||||
output = strings.TrimSpace(output)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("touch %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "touch")
|
||||
}
|
||||
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if diff, ok := checkTimeEqualWithPrecision(t, o.modTime, o.fs.Precision()); !ok {
|
||||
return errors.Errorf("touch %q to %s was ineffective: %d", o.remote, t, diff)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
||||
dt := t0.Sub(t1)
|
||||
if dt >= precision || dt <= -precision {
|
||||
return dt, false
|
||||
}
|
||||
return dt, true
|
||||
}
|
||||
|
||||
func decodeEntryMode(entryMode uint32) os.FileMode {
|
||||
const (
|
||||
unixIFBLK = 0x6000
|
||||
unixIFMT = 0xf000
|
||||
unixIFCHR = 0x2000
|
||||
unixIFDIR = 0x4000
|
||||
unixIFIFO = 0x1000
|
||||
unixIFLNK = 0xa000
|
||||
unixIFREG = 0x8000
|
||||
unixIFSOCK = 0xc000
|
||||
unixISGID = 0x400
|
||||
unixISUID = 0x800
|
||||
unixISVTX = 0x200
|
||||
)
|
||||
|
||||
mode := os.FileMode(entryMode & 0777)
|
||||
switch entryMode & unixIFMT {
|
||||
case unixIFBLK:
|
||||
mode |= os.ModeDevice
|
||||
case unixIFCHR:
|
||||
mode |= os.ModeDevice | os.ModeCharDevice
|
||||
case unixIFDIR:
|
||||
mode |= os.ModeDir
|
||||
case unixIFIFO:
|
||||
mode |= os.ModeNamedPipe
|
||||
case unixIFLNK:
|
||||
mode |= os.ModeSymlink
|
||||
case unixIFREG:
|
||||
// nothing to do
|
||||
case unixIFSOCK:
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
if entryMode&unixISGID != 0 {
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if entryMode&unixISUID != 0 {
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if entryMode&unixISVTX != 0 {
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
type execDevice struct {
|
||||
*adb.Device
|
||||
}
|
||||
|
||||
func (d *execDevice) execCommandWithExitCode(cmd string, arg string) (string, int, error) {
|
||||
cmdLine := fmt.Sprintf("sh -c '%s \"$0\"; echo :$?' '%s'", cmd, strings.Replace(arg, "'", "'\\''", -1))
|
||||
fs.Debugf("adb", "exec: %s", cmdLine)
|
||||
conn, err := d.execCommand(cmdLine)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
resp, err := conn.ReadUntilEof()
|
||||
if err != nil {
|
||||
return "", -1, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
|
||||
outStr := string(resp)
|
||||
idx := strings.LastIndexByte(outStr, ':')
|
||||
if idx == -1 {
|
||||
return outStr, -1, fmt.Errorf("adb shell aborted, can not parse exit code")
|
||||
}
|
||||
exitCode, _ := strconv.Atoi(strings.TrimSpace(outStr[idx+1:]))
|
||||
if exitCode != 0 {
|
||||
err = adb.ShellExitError{Command: cmdLine, ExitCode: exitCode}
|
||||
}
|
||||
return outStr[:idx], exitCode, err
|
||||
}
|
||||
|
||||
func (d *execDevice) execCommand(cmd string, args ...string) (*wire.Conn, error) {
|
||||
cmd = prepareCommandLineEscaped(cmd, args...)
|
||||
conn, err := d.Dial()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil && conn != nil {
|
||||
_ = conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
req := fmt.Sprintf("exec:%s", cmd)
|
||||
|
||||
if err = conn.SendMessage([]byte(req)); err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
if _, err = conn.ReadStatus(req); err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func prepareCommandLineEscaped(cmd string, args ...string) string {
|
||||
for i, arg := range args {
|
||||
args[i] = fmt.Sprintf("'%s'", strings.Replace(arg, "'", "'\\''", -1))
|
||||
}
|
||||
|
||||
// Prepend the command to the args array.
|
||||
if len(args) > 0 {
|
||||
cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type adbReader struct {
|
||||
io.ReadCloser
|
||||
skip int64
|
||||
read int64
|
||||
expected int64
|
||||
}
|
||||
|
||||
func (r *adbReader) Read(b []byte) (n int, err error) {
|
||||
n, err = r.ReadCloser.Read(b)
|
||||
if s := r.skip; n > 0 && s > 0 {
|
||||
_n := int64(n)
|
||||
if _n <= s {
|
||||
r.skip -= _n
|
||||
return r.Read(b)
|
||||
}
|
||||
r.skip = 0
|
||||
copy(b, b[s:n])
|
||||
n -= int(s)
|
||||
}
|
||||
r.read += int64(n)
|
||||
if err == io.EOF && r.read < r.expected {
|
||||
fs.Debugf("adb", "Read: read: %d expected: %d n: %d", r.read, r.expected, n)
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
20
backend/adb/adb_test.go
Normal file
20
backend/adb/adb_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Test ADB filesystem interface
|
||||
package adb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/adb"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAdb:/data/local/tmp",
|
||||
NilObject: (*adb.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: "TestAdb", Key: "copy_links", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package all
|
||||
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/backend/adb"
|
||||
_ "github.com/ncw/rclone/backend/alias"
|
||||
_ "github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/backend/azureblob"
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/jottacloud"
|
||||
_ "github.com/ncw/rclone/backend/koofr"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/mega"
|
||||
_ "github.com/ncw/rclone/backend/onedrive"
|
||||
|
||||
@@ -155,7 +155,7 @@ type Fs struct {
|
||||
noAuthClient *http.Client // unauthenticated http client
|
||||
root string // the path we are working on
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
trueRootID string // ID of true root directory
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
@@ -273,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -144,7 +144,7 @@ type Fs struct {
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
containerOK bool // true if we have created the container
|
||||
containerDeleted bool // true if we have deleted the container
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
}
|
||||
|
||||
@@ -347,7 +347,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
@@ -1386,16 +1386,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
blob := o.getBlobReference()
|
||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||
httpHeaders.ContentType = fs.MimeType(o)
|
||||
// Compute the Content-MD5 of the file, for multiparts uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
|
||||
// in order to validate its integrity during transport
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate
|
||||
// MD5 only for PutBlob requests
|
||||
if size < int64(o.fs.opt.UploadCutoff) {
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -167,7 +167,7 @@ type Fs struct {
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadURLResponse // result of get upload URL calls
|
||||
authMu sync.Mutex // lock for authorizing the account
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
bufferTokens chan []byte // control concurrency of multipart uploads
|
||||
}
|
||||
|
||||
@@ -251,7 +251,13 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
|
||||
fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
|
||||
}
|
||||
}
|
||||
return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
|
||||
retryAfterDuration := time.Duration(retryAfter) * time.Second
|
||||
if f.pacer.GetSleep() < retryAfterDuration {
|
||||
fs.Debugf(f, "Setting sleep to %v after error: %v", retryAfterDuration, err)
|
||||
// We set 1/2 the value here because the pacer will double it immediately
|
||||
f.pacer.SetSleep(retryAfterDuration / 2)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
@@ -357,7 +363,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -952,13 +958,6 @@ func (f *Fs) hide(Name string) error {
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Code == "already_hidden" {
|
||||
// sometimes eventual consistency causes this, so
|
||||
// ignore this error since it is harmless
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Wrapf(err, "failed to hide %q", Name)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -111,7 +111,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
}
|
||||
@@ -260,7 +260,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
||||
4
backend/cache/cache.go
vendored
4
backend/cache/cache.go
vendored
@@ -1191,7 +1191,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
}
|
||||
|
||||
var queuedEntries []*Object
|
||||
err = walk.ListR(f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err = walk.Walk(f.tempFs, dir, true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
for _, o := range entries {
|
||||
if oo, ok := o.(fs.Object); ok {
|
||||
co := ObjectFromOriginal(f, oo)
|
||||
@@ -1287,7 +1287,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
var queuedEntries []*Object
|
||||
err := walk.ListR(f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err := walk.Walk(f.tempFs, srcRemote, true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
for _, o := range entries {
|
||||
if oo, ok := o.(fs.Object); ok {
|
||||
co := ObjectFromOriginal(f, oo)
|
||||
|
||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -1023,7 +1023,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
||||
}
|
||||
|
||||
var queuedEntries []fs.Object
|
||||
err = walk.ListR(cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err = walk.Walk(cacheFs.tempFs, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
for _, o := range entries {
|
||||
if oo, ok := o.(fs.Object); ok {
|
||||
queuedEntries = append(queuedEntries, oo)
|
||||
|
||||
@@ -186,10 +186,10 @@ func init() {
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nSetting your own is recommended.",
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "scope",
|
||||
Help: "Scope that rclone should use when requesting access from drive.",
|
||||
@@ -240,22 +240,6 @@ func init() {
|
||||
Default: false,
|
||||
Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_checksum_gphotos",
|
||||
Default: false,
|
||||
Help: `Skip MD5 checksum on Google photos and videos only.
|
||||
|
||||
Use this if you get checksum errors when transferring Google photos or
|
||||
videos.
|
||||
|
||||
Setting this flag will cause Google photos and videos to return a
|
||||
blank MD5 checksum.
|
||||
|
||||
Google photos are identifed by being in the "photos" space.
|
||||
|
||||
Corrupted checksums are caused by Google modifying the image/video but
|
||||
not updating the checksum.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_with_me",
|
||||
Default: false,
|
||||
@@ -412,7 +396,6 @@ type Options struct {
|
||||
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
Extensions string `config:"formats"`
|
||||
@@ -443,7 +426,7 @@ type Fs struct {
|
||||
client *http.Client // authorized client
|
||||
rootFolderID string // the id of the root folder
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
exportExtensions []string // preferred extensions to download docs
|
||||
importMimeTypes []string // MIME types to convert to docs
|
||||
isTeamDrive bool // true if this is a team drive
|
||||
@@ -632,9 +615,6 @@ func (f *Fs) list(dirIDs []string, title string, directoriesOnly, filesOnly, inc
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
}
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
fields += ",spaces"
|
||||
}
|
||||
|
||||
fields = fmt.Sprintf("files(%s),nextPageToken", fields)
|
||||
|
||||
@@ -696,33 +676,28 @@ func isPowerOfTwo(x int64) bool {
|
||||
}
|
||||
|
||||
// add a charset parameter to all text/* MIME types
|
||||
func fixMimeType(mimeTypeIn string) string {
|
||||
if mimeTypeIn == "" {
|
||||
return ""
|
||||
}
|
||||
mediaType, param, err := mime.ParseMediaType(mimeTypeIn)
|
||||
func fixMimeType(mimeType string) string {
|
||||
mediaType, param, err := mime.ParseMediaType(mimeType)
|
||||
if err != nil {
|
||||
return mimeTypeIn
|
||||
return mimeType
|
||||
}
|
||||
mimeTypeOut := mimeTypeIn
|
||||
if strings.HasPrefix(mediaType, "text/") && param["charset"] == "" {
|
||||
if strings.HasPrefix(mimeType, "text/") && param["charset"] == "" {
|
||||
param["charset"] = "utf-8"
|
||||
mimeTypeOut = mime.FormatMediaType(mediaType, param)
|
||||
mimeType = mime.FormatMediaType(mediaType, param)
|
||||
}
|
||||
if mimeTypeOut == "" {
|
||||
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
|
||||
}
|
||||
return mimeTypeOut
|
||||
return mimeType
|
||||
}
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
func fixMimeTypeMap(m map[string][]string) map[string][]string {
|
||||
for _, v := range m {
|
||||
for i, mt := range v {
|
||||
v[i] = fixMimeType(mt)
|
||||
fixed := fixMimeType(mt)
|
||||
if fixed == "" {
|
||||
panic(errors.Errorf("unable to fix MIME type %q", mt))
|
||||
}
|
||||
v[i] = fixed
|
||||
}
|
||||
out[fixMimeType(k)] = v
|
||||
}
|
||||
return out
|
||||
return m
|
||||
}
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
@@ -814,8 +789,8 @@ func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
}
|
||||
|
||||
// newPacer makes a pacer configured for drive
|
||||
func newPacer(opt *Options) *fs.Pacer {
|
||||
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
|
||||
func newPacer(opt *Options) *pacer.Pacer {
|
||||
return pacer.New().SetMinSleep(time.Duration(opt.PacerMinSleep)).SetBurst(opt.PacerBurst).SetPacer(pacer.GoogleDrivePacer)
|
||||
}
|
||||
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
@@ -927,7 +902,6 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: true,
|
||||
}).Fill(f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
@@ -1022,15 +996,6 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
|
||||
|
||||
// newRegularObject creates a fs.Object for a normal drive.File
|
||||
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
for _, space := range info.Spaces {
|
||||
if space == "photos" {
|
||||
info.Md5Checksum = ""
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return &Object{
|
||||
baseObject: f.newBaseObject(remote, info),
|
||||
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id),
|
||||
@@ -2465,10 +2430,6 @@ func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (
|
||||
return req, nil, err
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
if o.bytes == 0 {
|
||||
// Don't supply range requests for 0 length objects as they always fail
|
||||
delete(req.Header, "Range")
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
|
||||
@@ -160,7 +160,7 @@ type Fs struct {
|
||||
team team.Client // for the Teams API
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
}
|
||||
|
||||
@@ -209,7 +209,7 @@ func shouldRetry(err error) (bool, error) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
@@ -273,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
config := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -46,11 +45,6 @@ func init() {
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -58,11 +52,10 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -77,7 +70,6 @@ type Fs struct {
|
||||
dialAddr string
|
||||
poolMu sync.Mutex
|
||||
pool []*ftp.ServerConn
|
||||
tokens *pacer.TokenDispenser
|
||||
}
|
||||
|
||||
// Object describes an FTP file
|
||||
@@ -136,9 +128,6 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
|
||||
// Get an FTP connection from the pool, or open a new one
|
||||
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
f.tokens.Get()
|
||||
}
|
||||
f.poolMu.Lock()
|
||||
if len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
@@ -158,9 +147,6 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
// if err is not nil then it checks the connection is alive using a
|
||||
// NOOP request
|
||||
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
defer f.tokens.Put()
|
||||
}
|
||||
c := *pc
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
@@ -212,7 +198,6 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
user: user,
|
||||
pass: pass,
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
|
||||
@@ -16,7 +16,6 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
@@ -46,8 +45,6 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
// NOTE: This API is deprecated
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
@@ -147,22 +144,6 @@ func init() {
|
||||
Value: "publicReadWrite",
|
||||
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_policy_only",
|
||||
Help: `Access checks should use bucket-level IAM policies.
|
||||
|
||||
If you want to upload objects to a bucket with Bucket Policy Only set
|
||||
then you will need to set this.
|
||||
|
||||
When it is set, rclone:
|
||||
|
||||
- ignores ACLs set on buckets
|
||||
- ignores ACLs set on objects
|
||||
- creates buckets with Bucket Policy Only set
|
||||
|
||||
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "location",
|
||||
Help: "Location for the newly created buckets.",
|
||||
@@ -260,7 +241,6 @@ type Options struct {
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
}
|
||||
@@ -276,7 +256,7 @@ type Fs struct {
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -401,11 +381,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||
if err != nil {
|
||||
ctx := context.Background()
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -419,7 +395,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -733,19 +709,8 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
if f.opt.BucketPolicyOnly {
|
||||
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
||||
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -1011,11 +976,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -6,7 +6,6 @@ package http
|
||||
|
||||
import (
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -45,22 +44,6 @@ func init() {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}, {
|
||||
Name: "no_slash",
|
||||
Help: `Set this if the site doesn't end directories with /
|
||||
|
||||
Use this if your target website does not use / on the end of
|
||||
directories.
|
||||
|
||||
A / on the end of a path is how rclone normally tells the difference
|
||||
between files and directories. If this flag is set, then rclone will
|
||||
treat all files with Content-Type: text/html as directories and read
|
||||
URLs from them rather than downloading them.
|
||||
|
||||
Note that this may cause rclone to confuse genuine HTML files with
|
||||
directories.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -69,7 +52,6 @@ directories.`,
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
NoSlash bool `config:"no_slash"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
@@ -288,20 +270,14 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
walk func(*html.Node)
|
||||
seen = make(map[string]struct{})
|
||||
)
|
||||
var walk func(*html.Node)
|
||||
walk = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "a" {
|
||||
for _, a := range n.Attr {
|
||||
if a.Key == "href" {
|
||||
name, err := parseName(base, a.Val)
|
||||
if err == nil {
|
||||
if _, found := seen[name]; !found {
|
||||
names = append(names, name)
|
||||
seen[name] = struct{}{}
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
break
|
||||
}
|
||||
@@ -326,16 +302,14 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
res, err := f.httpClient.Get(URL)
|
||||
if err == nil {
|
||||
defer fs.CheckClose(res.Body, &err)
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
}
|
||||
defer fs.CheckClose(res.Body, &err)
|
||||
|
||||
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
||||
switch contentType {
|
||||
@@ -379,16 +353,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err = file.stat(); err {
|
||||
case nil:
|
||||
entries = append(entries, file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
default:
|
||||
if err = file.stat(); err != nil {
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
continue
|
||||
}
|
||||
entries = append(entries, file)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
@@ -464,16 +433,6 @@ func (o *Object) stat() error {
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
// If NoSlash is set then check ContentType to see if it is a directory
|
||||
if o.fs.opt.NoSlash {
|
||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
|
||||
}
|
||||
if mediaType == "text/html" {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
return f, tidy
|
||||
}
|
||||
|
||||
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
func testListRoot(t *testing.T, f fs.Fs) {
|
||||
entries, err := f.List("")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -93,29 +93,15 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
|
||||
e = entries[3]
|
||||
assert.Equal(t, "two.html", e.Remote())
|
||||
if noSlash {
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
} else {
|
||||
assert.Equal(t, int64(41), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
assert.Equal(t, int64(7), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestListRoot(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
testListRoot(t, f, false)
|
||||
}
|
||||
|
||||
func TestListRootNoSlash(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
f.(*Fs).opt.NoSlash = true
|
||||
defer tidy()
|
||||
|
||||
testListRoot(t, f, true)
|
||||
testListRoot(t, f)
|
||||
}
|
||||
|
||||
func TestListSubDir(t *testing.T) {
|
||||
@@ -208,7 +194,7 @@ func TestIsAFileRoot(t *testing.T) {
|
||||
f, err := NewFs(remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
testListRoot(t, f, false)
|
||||
testListRoot(t, f)
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
|
||||
@@ -1 +1 @@
|
||||
<a href="two.html/file.txt">file.txt</a>
|
||||
potato
|
||||
|
||||
@@ -190,7 +190,7 @@ type Fs struct {
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
apiSrv *rest.Client
|
||||
pacer *fs.Pacer
|
||||
pacer *pacer.Pacer
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
@@ -381,9 +381,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
// add jottacloud to the long list of sites that don't follow the oauth spec correctly
|
||||
oauth2.RegisterBrokenAuthHeaderProvider("https://www.jottacloud.com/")
|
||||
|
||||
// the oauth client for the api servers needs
|
||||
// a filter to fix the grant_type issues (see above)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
@@ -406,7 +403,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
|
||||
@@ -1,589 +0,0 @@
|
||||
package koofr
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
|
||||
httpclient "github.com/koofr/go-httpclient"
|
||||
koofrclient "github.com/koofr/go-koofrclient"
|
||||
)
|
||||
|
||||
// Register Fs with rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "koofr",
|
||||
Description: "Koofr",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
Password string `config:"password"`
|
||||
}
|
||||
|
||||
// A Fs is a representation of a remote Koofr Fs
|
||||
type Fs struct {
|
||||
name string
|
||||
mountID string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
client *koofrclient.KoofrClient
|
||||
}
|
||||
|
||||
// An Object on the remote Koofr Fs
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
info koofrclient.FileInfo
|
||||
}
|
||||
|
||||
func base(pth string) string {
|
||||
rv := path.Base(pth)
|
||||
if rv == "" || rv == "." {
|
||||
rv = "/"
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
func dir(pth string) string {
|
||||
rv := path.Dir(pth)
|
||||
if rv == "" || rv == "." {
|
||||
rv = "/"
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
// String returns a string representation of the remote Object
|
||||
func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path of the Object, relative to Fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the Object
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
|
||||
}
|
||||
|
||||
// Size return the size of the Object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Size
|
||||
}
|
||||
|
||||
// Fs returns a reference to the Koofr Fs containing the Object
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns an MD5 hash of the Object
|
||||
func (o *Object) Hash(typ hash.Type) (string, error) {
|
||||
if typ == hash.MD5 {
|
||||
return o.info.Hash, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// fullPath returns full path of the remote Object (including Fs root)
|
||||
func (o *Object) fullPath() string {
|
||||
return o.fs.fullPath(o.remote)
|
||||
}
|
||||
|
||||
// Storable returns true if the Object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime is not supported
|
||||
func (o *Object) SetModTime(mtime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens the Object for reading
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var sOff, eOff int64 = 0, -1
|
||||
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
sOff = x.Offset
|
||||
case *fs.RangeOption:
|
||||
sOff = x.Start
|
||||
eOff = x.End
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if sOff == 0 && eOff < 0 {
|
||||
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
if sOff < 0 {
|
||||
sOff = o.Size() - eOff
|
||||
eOff = o.Size()
|
||||
}
|
||||
if eOff > o.Size() {
|
||||
eOff = o.Size()
|
||||
}
|
||||
span := &koofrclient.FileSpan{
|
||||
Start: sOff,
|
||||
End: eOff,
|
||||
}
|
||||
return o.fs.client.FilesGetRange(o.fs.mountID, o.fullPath(), span)
|
||||
}
|
||||
|
||||
// Update updates the Object contents
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
putopts := &koofrclient.PutFilter{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
IgnoreNonExisting: true,
|
||||
}
|
||||
fullPath := o.fullPath()
|
||||
dirPath := dir(fullPath)
|
||||
name := base(fullPath)
|
||||
err := o.fs.mkdir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.info = *info
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the remote Object
|
||||
func (o *Object) Remove() error {
|
||||
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
|
||||
// Name returns the name of the Fs
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root path of the Fs
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a string representation of the Fs
|
||||
func (f *Fs) String() string {
|
||||
return "koofr:" + f.mountID + ":" + f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features supported by this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision denotes that setting modification times is not supported
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns a set of hashes are Provided by the Fs
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// fullPath constructs a full, absolute path from a Fs root relative path,
|
||||
func (f *Fs) fullPath(part string) string {
|
||||
return path.Join("/", f.root, part)
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
|
||||
basicAuth := fmt.Sprintf("Basic %s",
|
||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||
client.HTTPClient.Headers.Set("Authorization", basicAuth)
|
||||
mounts, err := client.Mounts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
client: client,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
for _, m := range mounts {
|
||||
if opt.MountID != "" {
|
||||
if m.Id == opt.MountID {
|
||||
f.mountID = m.Id
|
||||
break
|
||||
}
|
||||
} else if m.IsPrimary {
|
||||
f.mountID = m.Id
|
||||
break
|
||||
}
|
||||
}
|
||||
if f.mountID == "" {
|
||||
if opt.MountID == "" {
|
||||
return nil, errors.New("Failed to find primary mount")
|
||||
}
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
f.root = dir(f.root)
|
||||
err = fs.ErrorIsFile
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// List returns a list of items in a directory
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return nil, translateErrorsDir(err)
|
||||
}
|
||||
entries = make([]fs.DirEntry, len(files))
|
||||
for i, file := range files {
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
info: file,
|
||||
remote: path.Join(dir, file.Name),
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject creates a new remote Object for a given remote path
|
||||
func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
|
||||
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
if info.Type == "dir" {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
info: info,
|
||||
remote: remote,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Put updates a remote Object
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
|
||||
putopts := &koofrclient.PutFilter{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
IgnoreNonExisting: true,
|
||||
}
|
||||
fullPath := f.fullPath(src.Remote())
|
||||
dirPath := dir(fullPath)
|
||||
name := base(fullPath)
|
||||
err = f.mkdir(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts)
|
||||
if err != nil {
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
info: *info,
|
||||
remote: src.Remote(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutStream updates a remote Object with a stream of unknown size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
}
|
||||
|
||||
// isBadRequest is a predicate which holds true iff the error returned was
|
||||
// HTTP status 400
|
||||
func isBadRequest(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusBadRequest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// translateErrorsDir translates koofr errors to rclone errors (for a dir
|
||||
// operation)
|
||||
func translateErrorsDir(err error) error {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// translatesErrorsObject translates Koofr errors to rclone errors (for an object operation)
|
||||
func translateErrorsObject(err error) error {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// mkdir creates a directory at the given remote path. Creates ancestors if
|
||||
// neccessary
|
||||
func (f *Fs) mkdir(fullPath string) error {
|
||||
if fullPath == "/" {
|
||||
return nil
|
||||
}
|
||||
info, err := f.client.FilesInfo(f.mountID, fullPath)
|
||||
if err == nil && info.Type == "dir" {
|
||||
return nil
|
||||
}
|
||||
err = translateErrorsDir(err)
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
return err
|
||||
}
|
||||
dirs := strings.Split(fullPath, "/")
|
||||
parent := "/"
|
||||
for _, part := range dirs {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
info, err = f.client.FilesInfo(f.mountID, path.Join(parent, part))
|
||||
if err != nil || info.Type != "dir" {
|
||||
err = translateErrorsDir(err)
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
return err
|
||||
}
|
||||
err = f.client.FilesNewFolder(f.mountID, parent, part)
|
||||
if err != nil && !isBadRequest(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
parent = path.Join(parent, part)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates a directory at the given remote path. Creates ancestors if
|
||||
// necessary
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
fullPath := f.fullPath(dir)
|
||||
return f.mkdir(fullPath)
|
||||
}
|
||||
|
||||
// Rmdir removes an (empty) directory at the given remote path
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return translateErrorsDir(err)
|
||||
}
|
||||
if len(files) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
err = f.client.FilesDelete(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return translateErrorsDir(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy copies a remote Object to the given path
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
dstFullPath := f.fullPath(remote)
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
|
||||
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
|
||||
f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
}
|
||||
|
||||
// Move moves a remote Object to the given path
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj := src.(*Object)
|
||||
dstFullPath := f.fullPath(remote)
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err = f.client.FilesMove(srcObj.fs.mountID,
|
||||
srcObj.fs.fullPath(srcObj.remote), f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
}
|
||||
|
||||
// DirMove moves a remote directory to the given path
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs := src.(*Fs)
|
||||
srcFullPath := srcFs.fullPath(srcRemote)
|
||||
dstFullPath := f.fullPath(dstRemote)
|
||||
if srcFs.mountID == f.mountID && srcFullPath == dstFullPath {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
err = f.client.FilesMove(srcFs.mountID, srcFullPath, f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// About reports space usage (with a MB precision)
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
mount, err := f.client.MountsDetails(f.mountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(mount.SpaceTotal * 1024 * 1024),
|
||||
Used: fs.NewUsageValue(mount.SpaceUsed * 1024 * 1024),
|
||||
Trashed: nil,
|
||||
Other: nil,
|
||||
Free: fs.NewUsageValue((mount.SpaceTotal - mount.SpaceUsed) * 1024 * 1024),
|
||||
Objects: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Purge purges the complete Fs
|
||||
func (f *Fs) Purge() error {
|
||||
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
|
||||
return err
|
||||
}
|
||||
|
||||
// linkCreate is a Koofr API request for creating a public link
|
||||
type linkCreate struct {
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// link is a Koofr API response to creating a public link
|
||||
type link struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Counter int64 `json:"counter"`
|
||||
URL string `json:"url"`
|
||||
ShortURL string `json:"shortUrl"`
|
||||
Hash string `json:"hash"`
|
||||
Host string `json:"host"`
|
||||
HasPassword bool `json:"hasPassword"`
|
||||
Password string `json:"password"`
|
||||
ValidFrom int64 `json:"validFrom"`
|
||||
ValidTo int64 `json:"validTo"`
|
||||
PasswordRequired bool `json:"passwordRequired"`
|
||||
}
|
||||
|
||||
// createLink makes a Koofr API call to create a public link
|
||||
func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link, error) {
|
||||
linkCreate := linkCreate{
|
||||
Path: path,
|
||||
}
|
||||
linkData := link{}
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "POST",
|
||||
Path: "/api/v2/mounts/" + mountID + "/links",
|
||||
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
|
||||
ReqEncoding: httpclient.EncodingJSON,
|
||||
ReqValue: linkCreate,
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &linkData,
|
||||
}
|
||||
|
||||
_, err := c.Request(&request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &linkData, nil
|
||||
}
|
||||
|
||||
// PublicLink creates a public link to the remote path
|
||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return "", translateErrorsDir(err)
|
||||
}
|
||||
return linkData.ShortURL, nil
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package koofr_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestKoofr:",
|
||||
})
|
||||
}
|
||||
@@ -98,7 +98,7 @@ type Fs struct {
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
srv *mega.Mega // the connection to the server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
rootNodeMu sync.Mutex // mutex for _rootNode
|
||||
_rootNode *mega.Node // root node - call findRoot to use this
|
||||
mkdirMu sync.Mutex // used to serialize calls to mkdir / rmdir
|
||||
@@ -217,7 +217,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: srv,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
|
||||
@@ -261,7 +261,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
driveID string // ID to use for querying Microsoft Graph
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
@@ -335,13 +335,8 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
|
||||
// if `relPath` == "", it reads the metadata for the item with that ID.
|
||||
//
|
||||
// We address items using the pattern `drives/driveID/items/itemID:/relativePath`
|
||||
// instead of simply using `drives/driveID/root:/itemPath` because it works for
|
||||
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
|
||||
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(replaceReservedChars(relPath))))
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+rest.URLPathEscape(replaceReservedChars(relPath)))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -480,7 +475,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
driveID: opt.DriveID,
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -708,7 +703,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
id := info.GetID()
|
||||
f.dirCache.Put(remote, id)
|
||||
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
|
||||
d.SetItems(folder.ChildCount)
|
||||
if folder != nil {
|
||||
d.SetItems(folder.ChildCount)
|
||||
}
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(remote, info)
|
||||
@@ -822,6 +819,9 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
return err
|
||||
}
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1340,12 +1340,12 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(leaf)),
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/root:/" + withTrailingColon(rest.URLPathEscape(o.srvPath())),
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
}
|
||||
}
|
||||
update := api.SetFileSystemInfo{
|
||||
@@ -1668,21 +1668,6 @@ func getRelativePathInsideBase(base, target string) (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Adds a ":" at the end of `remotePath` in a proper manner.
|
||||
// If `remotePath` already ends with "/", change it to ":/"
|
||||
// If `remotePath` is "", return "".
|
||||
// A workaround for #2720 and #3039
|
||||
func withTrailingColon(remotePath string) string {
|
||||
if remotePath == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
if strings.HasSuffix(remotePath, "/") {
|
||||
return remotePath[:len(remotePath)-1] + ":/"
|
||||
}
|
||||
return remotePath + ":"
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
||||
@@ -65,7 +65,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
session UserSessionInfo // contains the session data
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
}
|
||||
@@ -144,7 +144,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, "0", f)
|
||||
@@ -287,6 +287,9 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
return err
|
||||
}
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
@@ -254,7 +254,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
|
||||
@@ -782,7 +782,7 @@ type Fs struct {
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
}
|
||||
|
||||
@@ -1055,7 +1055,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -427,12 +427,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
}
|
||||
|
||||
return NewFsWithConnection(name, root, opt, sshConfig)
|
||||
}
|
||||
|
||||
// NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to
|
||||
// the host specified in the ssh.ClientConfig
|
||||
func NewFsWithConnection(name string, root string, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
|
||||
@@ -2,7 +2,6 @@ package swift
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
@@ -66,14 +65,6 @@ func (a *auth) Token() string {
|
||||
return a.parentAuth.Token()
|
||||
}
|
||||
|
||||
// Expires returns the time the token expires if known or Zero if not.
|
||||
func (a *auth) Expires() (t time.Time) {
|
||||
if do, ok := a.parentAuth.(swift.Expireser); ok {
|
||||
t = do.Expires()
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// The CDN url if available
|
||||
func (a *auth) CdnUrl() string { // nolint
|
||||
if a.parentAuth == nil {
|
||||
@@ -83,7 +74,4 @@ func (a *auth) CdnUrl() string { // nolint
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ swift.Authenticator = (*auth)(nil)
|
||||
_ swift.Expireser = (*auth)(nil)
|
||||
)
|
||||
var _ swift.Authenticator = (*auth)(nil)
|
||||
|
||||
@@ -216,7 +216,7 @@ type Fs struct {
|
||||
containerOK bool // true if we have created the container
|
||||
segmentsContainer string // container to store the segments (if any) in
|
||||
noCheckContainer bool // don't check the container before creating it
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
@@ -401,7 +401,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
|
||||
@@ -101,7 +101,7 @@ type Fs struct {
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
@@ -318,7 +318,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
@@ -644,18 +644,10 @@ func (f *Fs) _mkdir(dirPath string) error {
|
||||
Path: dirPath,
|
||||
NoResponse: true,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// already exists
|
||||
// owncloud returns 423/StatusLocked if the create is already in progress
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
@@ -663,7 +655,12 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
err := f._mkdir(dirPath)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// parent does not exist so create it first then try again
|
||||
// already exists
|
||||
// owncloud returns 423/StatusLocked if the create is already in progress
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
|
||||
return nil
|
||||
}
|
||||
// parent does not exist
|
||||
if apiErr.StatusCode == http.StatusConflict {
|
||||
err = f.mkParentDir(dirPath)
|
||||
if err == nil {
|
||||
@@ -916,13 +913,11 @@ func (f *Fs) About() (*fs.Usage, error) {
|
||||
return nil, errors.Wrap(err, "about call failed")
|
||||
}
|
||||
usage := &fs.Usage{}
|
||||
if q.Available != 0 || q.Used != 0 {
|
||||
if q.Available >= 0 && q.Used >= 0 {
|
||||
usage.Total = fs.NewUsageValue(q.Available + q.Used)
|
||||
}
|
||||
if q.Used >= 0 {
|
||||
usage.Used = fs.NewUsageValue(q.Used)
|
||||
}
|
||||
if q.Available >= 0 && q.Used >= 0 {
|
||||
usage.Total = fs.NewUsageValue(q.Available + q.Used)
|
||||
}
|
||||
if q.Used >= 0 {
|
||||
usage.Used = fs.NewUsageValue(q.Used)
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the yandex server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
diskRoot string // root path with "disk:/" container name
|
||||
}
|
||||
|
||||
@@ -269,7 +269,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
name: name,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -17,18 +17,14 @@ import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"golang.org/x/net/html"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -37,7 +33,6 @@ var (
|
||||
install = flag.Bool("install", false, "Install the downloaded package using sudo dpkg -i.")
|
||||
extract = flag.String("extract", "", "Extract the named executable from the .tar.gz and install into bindir.")
|
||||
bindir = flag.String("bindir", defaultBinDir(), "Directory to install files downloaded with -extract.")
|
||||
useAPI = flag.Bool("use-api", false, "Use the API for finding the release instead of scraping the page.")
|
||||
// Globals
|
||||
matchProject = regexp.MustCompile(`^([\w-]+)/([\w-]+)$`)
|
||||
osAliases = map[string][]string{
|
||||
@@ -214,57 +209,6 @@ func getAsset(project string, matchName *regexp.Regexp) (string, string) {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// Get an asset URL and name by scraping the downloads page
|
||||
//
|
||||
// This doesn't use the API so isn't rate limited when not using GITHUB login details
|
||||
func getAssetFromReleasesPage(project string, matchName *regexp.Regexp) (assetURL string, assetName string) {
|
||||
baseURL := "https://github.com/" + project + "/releases"
|
||||
log.Printf("Fetching asset info for %q from %q", project, baseURL)
|
||||
base, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
log.Fatalf("URL Parse failed: %v", err)
|
||||
}
|
||||
resp, err := http.Get(baseURL)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to fetch release info %q: %v", baseURL, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Printf("Error: %s", readBody(resp.Body))
|
||||
log.Fatalf("Bad status %d when fetching %q release info: %s", resp.StatusCode, baseURL, resp.Status)
|
||||
}
|
||||
doc, err := html.Parse(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse web page: %v", err)
|
||||
}
|
||||
var walk func(*html.Node)
|
||||
walk = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "a" {
|
||||
for _, a := range n.Attr {
|
||||
if a.Key == "href" {
|
||||
if name := path.Base(a.Val); matchName.MatchString(name) && isOurOsArch(name) {
|
||||
if u, err := rest.URLJoin(base, a.Val); err == nil {
|
||||
if assetName == "" {
|
||||
assetName = name
|
||||
assetURL = u.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
walk(c)
|
||||
}
|
||||
}
|
||||
walk(doc)
|
||||
if assetName == "" || assetURL == "" {
|
||||
log.Fatalf("Didn't find URL in page")
|
||||
}
|
||||
return assetURL, assetName
|
||||
}
|
||||
|
||||
// isOurOsArch returns true if s contains our OS and our Arch
|
||||
func isOurOsArch(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
@@ -402,12 +346,7 @@ func main() {
|
||||
log.Fatalf("Invalid regexp for name %q: %v", nameRe, err)
|
||||
}
|
||||
|
||||
var assetURL, assetName string
|
||||
if *useAPI {
|
||||
assetURL, assetName = getAsset(project, matchName)
|
||||
} else {
|
||||
assetURL, assetName = getAssetFromReleasesPage(project, matchName)
|
||||
}
|
||||
assetURL, assetName := getAsset(project, matchName)
|
||||
fileName := filepath.Join(os.TempDir(), assetName)
|
||||
getFile(assetURL, fileName)
|
||||
|
||||
|
||||
@@ -36,7 +36,6 @@ docs = [
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
"mega.md",
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
|
||||
@@ -341,7 +341,8 @@ func initConfig() {
|
||||
configflags.SetFlags()
|
||||
|
||||
// Load filters
|
||||
err := filterflags.Reload()
|
||||
var err error
|
||||
filter.Active, err = filter.NewFilter(&filterflags.Opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load filters: %v", err)
|
||||
}
|
||||
|
||||
@@ -7,13 +7,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -74,7 +69,7 @@ changed recently very efficiently like this:
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.CopyDir(fdst, fsrc, createEmptySrcDirs)
|
||||
return sync.CopyDir(fdst, fsrc)
|
||||
}
|
||||
return operations.CopyFile(fdst, fsrc, srcFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -48,7 +48,7 @@ destination.
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.CopyDir(fdst, fsrc, false)
|
||||
return sync.CopyDir(fdst, fsrc)
|
||||
}
|
||||
return operations.CopyFile(fdst, fsrc, dstFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -45,7 +45,7 @@ __rclone_custom_func() {
|
||||
else
|
||||
__rclone_init_completion -n : || return
|
||||
fi
|
||||
if [[ $cur != *:* ]]; then
|
||||
if [[ $cur =~ ^[[:alnum:]]*$ ]]; then
|
||||
local remote
|
||||
while IFS= read -r remote; do
|
||||
[[ $remote != $cur* ]] || COMPREPLY+=("$remote")
|
||||
@@ -54,10 +54,10 @@ __rclone_custom_func() {
|
||||
local paths=("$cur"*)
|
||||
[[ ! -f ${paths[0]} ]] || COMPREPLY+=("${paths[@]}")
|
||||
fi
|
||||
else
|
||||
elif [[ $cur =~ ^[[:alnum:]]+: ]]; then
|
||||
local path=${cur#*:}
|
||||
if [[ $path == */* ]]; then
|
||||
local prefix=$(eval printf '%s' "${path%/*}")
|
||||
local prefix=${path%/*}
|
||||
else
|
||||
local prefix=
|
||||
fi
|
||||
@@ -66,7 +66,6 @@ __rclone_custom_func() {
|
||||
local reply=${prefix:+$prefix/}$line
|
||||
[[ $reply != $path* ]] || COMPREPLY+=("$reply")
|
||||
done < <(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null)
|
||||
[[ ! ${COMPREPLY[@]} ]] || compopt -o filenames
|
||||
fi
|
||||
[[ ! ${COMPREPLY[@]} ]] || compopt -o nospace
|
||||
fi
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -66,10 +67,8 @@ output:
|
||||
s - size
|
||||
t - modification time
|
||||
h - hash
|
||||
i - ID of object
|
||||
o - Original ID of underlying object
|
||||
i - ID of object if known
|
||||
m - MimeType of object if known
|
||||
e - encrypted name
|
||||
|
||||
So if you wanted the path, size and modification time, you would use
|
||||
--format "pst", or maybe --format "tsp" to put the path last.
|
||||
@@ -162,10 +161,6 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
|
||||
list.SetCSV(csv)
|
||||
list.SetDirSlash(dirSlash)
|
||||
list.SetAbsolute(absolute)
|
||||
var opt = operations.ListJSONOpt{
|
||||
NoModTime: true,
|
||||
Recurse: recurse,
|
||||
}
|
||||
|
||||
for _, char := range format {
|
||||
switch char {
|
||||
@@ -173,38 +168,38 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
|
||||
list.AddPath()
|
||||
case 't':
|
||||
list.AddModTime()
|
||||
opt.NoModTime = false
|
||||
case 's':
|
||||
list.AddSize()
|
||||
case 'h':
|
||||
list.AddHash(hashType)
|
||||
opt.ShowHash = true
|
||||
case 'i':
|
||||
list.AddID()
|
||||
case 'm':
|
||||
list.AddMimeType()
|
||||
case 'e':
|
||||
list.AddEncrypted()
|
||||
opt.ShowEncrypted = true
|
||||
case 'o':
|
||||
list.AddOrigID()
|
||||
opt.ShowOrigIDs = true
|
||||
default:
|
||||
return errors.Errorf("Unknown format character %q", char)
|
||||
}
|
||||
}
|
||||
|
||||
return operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error {
|
||||
if item.IsDir {
|
||||
if filesOnly {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if dirsOnly {
|
||||
return nil
|
||||
}
|
||||
return walk.Walk(fsrc, "", false, operations.ConfigMaxDepth(recurse), func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
fs.Errorf(path, "error listing: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, entry := range entries {
|
||||
_, isDir := entry.(fs.Directory)
|
||||
if isDir {
|
||||
if filesOnly {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if dirsOnly {
|
||||
continue
|
||||
}
|
||||
}
|
||||
_, _ = fmt.Fprintln(out, list.Format(entry))
|
||||
}
|
||||
_, _ = fmt.Fprintln(out, list.Format(item))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -37,8 +37,6 @@ func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
a.Crtime = modTime
|
||||
// FIXME include Valid so get some caching?
|
||||
// FIXME fs.Debugf(d.path, "Dir.Attr %+v", a)
|
||||
a.Size = 512
|
||||
a.Blocks = 1
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,13 +10,11 @@ import (
|
||||
// Globals
|
||||
var (
|
||||
deleteEmptySrcDirs = false
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -54,7 +52,7 @@ can speed transfers up greatly.
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs, createEmptySrcDirs)
|
||||
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs)
|
||||
}
|
||||
return operations.MoveFile(fdst, fsrc, srcFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -52,7 +52,7 @@ transfer.
|
||||
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.MoveDir(fdst, fsrc, false, false)
|
||||
return sync.MoveDir(fdst, fsrc, false)
|
||||
}
|
||||
return operations.MoveFile(fdst, fsrc, dstFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
runewidth "github.com/mattn/go-runewidth"
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/cmd/ncdu/scan"
|
||||
"github.com/ncw/rclone/fs"
|
||||
@@ -123,7 +122,7 @@ func Printf(x, y int, fg, bg termbox.Attribute, format string, args ...interface
|
||||
func Line(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, msg string) {
|
||||
for _, c := range msg {
|
||||
termbox.SetCell(x, y, c, fg, bg)
|
||||
x += runewidth.RuneWidth(c)
|
||||
x++
|
||||
if x >= xmax {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,184 +0,0 @@
|
||||
package dlna
|
||||
|
||||
const connectionManagerServiceDescription = `<?xml version="1.0" encoding="UTF-8"?>
|
||||
<scpd xmlns="urn:schemas-upnp-org:service-1-0">
|
||||
<specVersion>
|
||||
<major>1</major>
|
||||
<minor>0</minor>
|
||||
</specVersion>
|
||||
<actionList>
|
||||
<action>
|
||||
<name>GetProtocolInfo</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>Source</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>SourceProtocolInfo</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Sink</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>SinkProtocolInfo</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>PrepareForConnection</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>RemoteProtocolInfo</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ProtocolInfo</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>PeerConnectionManager</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionManager</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>PeerConnectionID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Direction</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Direction</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>ConnectionID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>AVTransportID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_AVTransportID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>RcsID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_RcsID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>ConnectionComplete</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ConnectionID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetCurrentConnectionIDs</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ConnectionIDs</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>CurrentConnectionIDs</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetCurrentConnectionInfo</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ConnectionID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>RcsID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_RcsID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>AVTransportID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_AVTransportID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>ProtocolInfo</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ProtocolInfo</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>PeerConnectionManager</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionManager</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>PeerConnectionID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Direction</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Direction</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Status</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionStatus</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
</actionList>
|
||||
<serviceStateTable>
|
||||
<stateVariable sendEvents="yes">
|
||||
<name>SourceProtocolInfo</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="yes">
|
||||
<name>SinkProtocolInfo</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="yes">
|
||||
<name>CurrentConnectionIDs</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_ConnectionStatus</name>
|
||||
<dataType>string</dataType>
|
||||
<allowedValueList>
|
||||
<allowedValue>OK</allowedValue>
|
||||
<allowedValue>ContentFormatMismatch</allowedValue>
|
||||
<allowedValue>InsufficientBandwidth</allowedValue>
|
||||
<allowedValue>UnreliableChannel</allowedValue>
|
||||
<allowedValue>Unknown</allowedValue>
|
||||
</allowedValueList>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_ConnectionManager</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_Direction</name>
|
||||
<dataType>string</dataType>
|
||||
<allowedValueList>
|
||||
<allowedValue>Input</allowedValue>
|
||||
<allowedValue>Output</allowedValue>
|
||||
</allowedValueList>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_ProtocolInfo</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_ConnectionID</name>
|
||||
<dataType>i4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_AVTransportID</name>
|
||||
<dataType>i4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_RcsID</name>
|
||||
<dataType>i4</dataType>
|
||||
</stateVariable>
|
||||
</serviceStateTable>
|
||||
</scpd>`
|
||||
@@ -84,21 +84,6 @@ var services = []*service{
|
||||
},
|
||||
SCPD: contentDirectoryServiceDescription,
|
||||
},
|
||||
{
|
||||
Service: upnp.Service{
|
||||
ServiceType: "urn:schemas-upnp-org:service:ConnectionManager:1",
|
||||
ServiceId: "urn:upnp-org:serviceId:ConnectionManager",
|
||||
ControlURL: serviceControlURL,
|
||||
},
|
||||
SCPD: connectionManagerServiceDescription,
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
for _, s := range services {
|
||||
p := path.Join("/scpd", s.ServiceId)
|
||||
s.SCPDURL = p
|
||||
}
|
||||
}
|
||||
|
||||
func devices() []string {
|
||||
@@ -265,6 +250,9 @@ func (s *server) initMux(mux *http.ServeMux) {
|
||||
|
||||
// Install handlers to serve SCPD for each UPnP service.
|
||||
for _, s := range services {
|
||||
p := path.Join("/scpd", s.ServiceId)
|
||||
s.SCPDURL = p
|
||||
|
||||
mux.HandleFunc(s.SCPDURL, func(serviceDesc string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", `text/xml; charset="utf-8"`)
|
||||
|
||||
@@ -59,11 +59,6 @@ func TestRootSCPD(t *testing.T) {
|
||||
// Make sure that the SCPD contains a CDS service.
|
||||
require.Contains(t, string(body),
|
||||
"<serviceType>urn:schemas-upnp-org:service:ContentDirectory:1</serviceType>")
|
||||
// Make sure that the SCPD contains a CM service.
|
||||
require.Contains(t, string(body),
|
||||
"<serviceType>urn:schemas-upnp-org:service:ConnectionManager:1</serviceType>")
|
||||
// Ensure that the SCPD url is configured.
|
||||
require.Regexp(t, "<SCPDURL>/.*</SCPDURL>", string(body))
|
||||
}
|
||||
|
||||
// Make sure that it serves content from the remote.
|
||||
|
||||
@@ -330,12 +330,25 @@ func (s *server) listObjects(w http.ResponseWriter, r *http.Request, remote stri
|
||||
ls := listItems{}
|
||||
|
||||
// if remote supports ListR use that directly, otherwise use recursive Walk
|
||||
err := walk.ListR(s.f, remote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
for _, entry := range entries {
|
||||
ls.add(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
var err error
|
||||
if ListR := s.f.Features().ListR; ListR != nil {
|
||||
err = ListR(remote, func(entries fs.DirEntries) error {
|
||||
for _, entry := range entries {
|
||||
ls.add(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
err = walk.Walk(s.f, remote, true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
if err == nil {
|
||||
for _, entry := range entries {
|
||||
ls.add(entry)
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
_, err = fserrors.Cause(err)
|
||||
if err != fs.ErrorDirNotFound {
|
||||
|
||||
@@ -6,13 +6,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after sync")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -44,7 +39,7 @@ go there.
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
return sync.Sync(fdst, fsrc, createEmptySrcDirs)
|
||||
return sync.Sync(fdst, fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ Rclone is a command line program to sync files and directories to and from:
|
||||
* {{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
|
||||
* {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
* {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
* {{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
|
||||
* {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
|
||||
* {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
|
||||
* {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}
|
||||
|
||||
@@ -237,14 +237,3 @@ Contributors
|
||||
* Jonathan <vanillajonathan@users.noreply.github.com>
|
||||
* James Carpenter <orbsmiv@users.noreply.github.com>
|
||||
* Vince <vince0villamora@gmail.com>
|
||||
* Nestar47 <47841759+Nestar47@users.noreply.github.com>
|
||||
* Six <brbsix@gmail.com>
|
||||
* Alexandru Bumbacea <alexandru.bumbacea@booking.com>
|
||||
* calisro <robert.calistri@gmail.com>
|
||||
* Dr.Rx <david.rey@nventive.com>
|
||||
* marcintustin <marcintustin@users.noreply.github.com>
|
||||
* jaKa Močnik <jaka@koofr.net>
|
||||
* Fionera <fionera@fionera.de>
|
||||
* Dan Walters <dan@walters.io>
|
||||
* Danil Semelenov <sgtpep@users.noreply.github.com>
|
||||
* xopez <28950736+xopez@users.noreply.github.com>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Usage"
|
||||
date: "2019-02-25"
|
||||
date: "2015-06-06"
|
||||
---
|
||||
|
||||
Configure
|
||||
@@ -34,7 +34,6 @@ See the following for detailed instructions for
|
||||
* [HTTP](/http/)
|
||||
* [Hubic](/hubic/)
|
||||
* [Jottacloud](/jottacloud/)
|
||||
* [Koofr](/koofr/)
|
||||
* [Mega](/mega/)
|
||||
* [Microsoft Azure Blob Storage](/azureblob/)
|
||||
* [Microsoft OneDrive](/onedrive/)
|
||||
@@ -99,7 +98,7 @@ The main rclone commands with most used first
|
||||
* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output shell completion scripts for rclone.
|
||||
* [rclone gendocs](/commands/rclone_gendocs/) - Output markdown docs for rclone to the directory supplied.
|
||||
* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file.
|
||||
* [rclone mount](/commands/rclone_mount/) - Mount the remote as a mountpoint.
|
||||
* [rclone mount](/commands/rclone_mount/) - Mount the remote as a mountpoint. **EXPERIMENTAL**
|
||||
* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest.
|
||||
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone.conf
|
||||
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Check the integrity of a crypted remote.
|
||||
@@ -171,24 +170,11 @@ should be the name or prefix of a backend (the `type` in the config
|
||||
file) and all the configuration for the backend should be provided on
|
||||
the command line (or in environment variables).
|
||||
|
||||
Here are some examples:
|
||||
Eg
|
||||
|
||||
rclone lsd --http-url https://pub.rclone.org :http:
|
||||
|
||||
To list all the directories in the root of `https://pub.rclone.org/`.
|
||||
|
||||
rclone lsf --http-url https://example.com :http:path/to/dir
|
||||
|
||||
To list files and directories in `https://example.com/path/to/dir/`
|
||||
|
||||
rclone copy --http-url https://example.com :http:path/to/dir /tmp/dir
|
||||
|
||||
To copy files and directories in `https://example.com/path/to/dir` to `/tmp/dir`.
|
||||
|
||||
rclone copy --sftp-host example.com :sftp:path/to/dir /tmp/dir
|
||||
|
||||
To copy files and directories from `example.com` in the relative
|
||||
directory `path/to/dir` to `/tmp/dir` using sftp.
|
||||
Which lists all the directories in `pub.rclone.org`.
|
||||
|
||||
Quoting and the shell
|
||||
---------------------
|
||||
@@ -821,16 +807,6 @@ then the files will have SUFFIX added on to them.
|
||||
|
||||
See `--backup-dir` for more info.
|
||||
|
||||
### --suffix-keep-extension ###
|
||||
|
||||
When using `--suffix`, setting this causes rclone put the SUFFIX
|
||||
before the extension of the files that it backs up rather than after.
|
||||
|
||||
So let's say we had `--suffix -2019-01-01`, without the flag `file.txt`
|
||||
would be backed up to `file.txt-2019-01-01` and with the flag it would
|
||||
be backed up to `file-2019-01-01.txt`. This can be helpful to make
|
||||
sure the suffixed files can still be opened.
|
||||
|
||||
### --syslog ###
|
||||
|
||||
On capable OSes (not Windows or Plan9) send all log output to syslog.
|
||||
|
||||
@@ -854,15 +854,6 @@ The most likely cause of this is the duplicated file issue above - run
|
||||
`rclone dedupe` and check your logs for duplicate object or directory
|
||||
messages.
|
||||
|
||||
This can also be caused by a delay/caching on google drive's end when
|
||||
comparing directory listings. Specifically with team drives used in
|
||||
combination with --fast-list. Files that were uploaded recently may
|
||||
not appear on the directory list sent to rclone when using --fast-list.
|
||||
|
||||
Waiting a moderate period of time between attempts (estimated to be
|
||||
approximately 1 hour) and/or not using --fast-list both seem to be
|
||||
effective in preventing the problem.
|
||||
|
||||
### Making your own client_id ###
|
||||
|
||||
When you use rclone with Google drive in its default configuration you
|
||||
|
||||
@@ -188,10 +188,3 @@ causes not all domains to be resolved properly.
|
||||
Additionally with the `GODEBUG=netdns=` environment variable the Go
|
||||
resolver decision can be influenced. This also allows to resolve certain
|
||||
issues with DNS resolution. See the [name resolution section in the go docs](https://golang.org/pkg/net/#hdr-Name_Resolution).
|
||||
|
||||
### The total size reported in the stats for a sync is wrong and keeps changing
|
||||
|
||||
It is likely you have more than 10,000 files that need to be
|
||||
synced. By default rclone only gets 10,000 files ahead in a sync so as
|
||||
not to use up too much memory. You can change this default with the
|
||||
[--max-backlog](/docs/#max-backlog-n) flag.
|
||||
|
||||
@@ -217,20 +217,6 @@ the rclone config file, you can set `service_account_credentials` with
|
||||
the actual contents of the file instead, or set the equivalent
|
||||
environment variable.
|
||||
|
||||
### Application Default Credentials ###
|
||||
|
||||
If no other source of credentials is provided, rclone will fall back
|
||||
to
|
||||
[Application Default Credentials](https://cloud.google.com/video-intelligence/docs/common/auth#authenticating_with_application_default_credentials)
|
||||
this is useful both when you already have configured authentication
|
||||
for your developer account, or in production when running on a google
|
||||
compute host. Note that if running in docker, you may need to run
|
||||
additional commands on your google compute machine -
|
||||
[see this page](https://cloud.google.com/container-registry/docs/advanced-authentication#gcloud_as_a_docker_credential_helper).
|
||||
|
||||
Note that in the case application default credentials are used, there
|
||||
is no need to explicitly configure a project number.
|
||||
|
||||
### --fast-list ###
|
||||
|
||||
This remote supports `--fast-list` which allows you to use fewer
|
||||
@@ -342,27 +328,6 @@ Access Control List for new buckets.
|
||||
- "publicReadWrite"
|
||||
- Project team owners get OWNER access, and all Users get WRITER access.
|
||||
|
||||
#### --gcs-bucket-policy-only
|
||||
|
||||
Access checks should use bucket-level IAM policies.
|
||||
|
||||
If you want to upload objects to a bucket with Bucket Policy Only set
|
||||
then you will need to set this.
|
||||
|
||||
When it is set, rclone:
|
||||
|
||||
- ignores ACLs set on buckets
|
||||
- ignores ACLs set on objects
|
||||
- creates buckets with Bucket Policy Only set
|
||||
|
||||
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
|
||||
|
||||
- Config: bucket_policy_only
|
||||
- Env Var: RCLONE_GCS_BUCKET_POLICY_ONLY
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --gcs-location
|
||||
|
||||
Location for the newly created buckets.
|
||||
|
||||
@@ -1,189 +0,0 @@
|
||||
---
|
||||
title: "Koofr"
|
||||
description: "Rclone docs for Koofr"
|
||||
date: "2019-02-25"
|
||||
---
|
||||
|
||||
<i class="fa fa-suitcase"></i> Koofr
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
The initial setup for Koofr involves creating an application password for
|
||||
rclone. You can do that by opening the Koofr
|
||||
[web application](https://app.koofr.net/app/admin/preferences/password),
|
||||
giving the password a nice name like `rclone` and clicking on generate.
|
||||
|
||||
Here is an example of how to make a remote called `koofr`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> koofr
|
||||
Type of storage to configure.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / A stackable unification remote, which can appear to merge the contents of several remotes
|
||||
\ "union"
|
||||
2 / Alias for a existing remote
|
||||
\ "alias"
|
||||
3 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)
|
||||
\ "s3"
|
||||
5 / Backblaze B2
|
||||
\ "b2"
|
||||
6 / Box
|
||||
\ "box"
|
||||
7 / Cache a remote
|
||||
\ "cache"
|
||||
8 / Dropbox
|
||||
\ "dropbox"
|
||||
9 / Encrypt/Decrypt a remote
|
||||
\ "crypt"
|
||||
10 / FTP Connection
|
||||
\ "ftp"
|
||||
11 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
12 / Google Drive
|
||||
\ "drive"
|
||||
13 / Hubic
|
||||
\ "hubic"
|
||||
14 / JottaCloud
|
||||
\ "jottacloud"
|
||||
15 / Koofr
|
||||
\ "koofr"
|
||||
16 / Local Disk
|
||||
\ "local"
|
||||
17 / Mega
|
||||
\ "mega"
|
||||
18 / Microsoft Azure Blob Storage
|
||||
\ "azureblob"
|
||||
19 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
20 / OpenDrive
|
||||
\ "opendrive"
|
||||
21 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
22 / Pcloud
|
||||
\ "pcloud"
|
||||
23 / QingCloud Object Storage
|
||||
\ "qingstor"
|
||||
24 / SSH/SFTP Connection
|
||||
\ "sftp"
|
||||
25 / Webdav
|
||||
\ "webdav"
|
||||
26 / Yandex Disk
|
||||
\ "yandex"
|
||||
27 / http Connection
|
||||
\ "http"
|
||||
Storage> koofr
|
||||
** See help for koofr backend at: https://rclone.org/koofr/ **
|
||||
|
||||
Your Koofr user name
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
user> USER@NAME
|
||||
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
|
||||
y) Yes type in my own password
|
||||
g) Generate random password
|
||||
y/g> y
|
||||
Enter the password:
|
||||
password:
|
||||
Confirm the password:
|
||||
password:
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> n
|
||||
Remote config
|
||||
--------------------
|
||||
[koofr]
|
||||
type = koofr
|
||||
baseurl = https://app.koofr.net
|
||||
user = USER@NAME
|
||||
password = *** ENCRYPTED ***
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
You can choose to edit advanced config in order to enter your own service URL
|
||||
if you use an on-premise or white label Koofr instance, or choose an alternative
|
||||
mount instead of your primary storage.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your Koofr
|
||||
|
||||
rclone lsd koofr:
|
||||
|
||||
List all the files in your Koofr
|
||||
|
||||
rclone ls koofr:
|
||||
|
||||
To copy a local directory to an Koofr directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/koofr/koofr.go then run make backenddocs -->
|
||||
### Standard Options
|
||||
|
||||
Here are the standard options specific to koofr (Koofr).
|
||||
|
||||
#### --koofr-user
|
||||
|
||||
Your Koofr user name
|
||||
|
||||
- Config: user
|
||||
- Env Var: RCLONE_KOOFR_USER
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --koofr-password
|
||||
|
||||
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
|
||||
|
||||
- Config: password
|
||||
- Env Var: RCLONE_KOOFR_PASSWORD
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to koofr (Koofr).
|
||||
|
||||
#### --koofr-baseurl
|
||||
|
||||
Base URL of the Koofr API to connect to
|
||||
|
||||
- Config: baseurl
|
||||
- Env Var: RCLONE_KOOFR_BASEURL
|
||||
- Type: string
|
||||
- Default: "https://app.koofr.net"
|
||||
|
||||
#### --koofr-mountid
|
||||
|
||||
Mount ID of the mount to use. If omitted, the primary mount is used.
|
||||
|
||||
- Config: mountid
|
||||
- Env Var: RCLONE_KOOFR_MOUNTID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
<!--- autogenerated options stop -->
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that Koofr is case insensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
@@ -298,13 +298,4 @@ Description: Using application 'rclone' is currently not supported for your orga
|
||||
|
||||
This means that rclone can't use the OneDrive for Business API with your account. You can't do much about it, maybe write an email to your admins.
|
||||
|
||||
However, there are other ways to interact with your OneDrive account. Have a look at the webdav backend: https://rclone.org/webdav/#sharepoint
|
||||
|
||||
|
||||
```
|
||||
Error: invalid_grant
|
||||
Code: AADSTS50076
|
||||
Description: Due to a configuration change made by your administrator, or because you moved to a new location, you must use multi-factor authentication to access '...'.
|
||||
```
|
||||
|
||||
If you see the error above after enabling multi-factor authentication for your account, you can fix it by refreshing your OAuth refresh token. To do that, run `rclone config`, and choose to edit your OneDrive backend. Then, you don't need to actually make any changes until you reach this question: `Already have a token - refresh?`. For this question, answer `y` and go through the process to refresh your token, just like the first time the backend is configured. After this, rclone should work again for this backend.
|
||||
However, there are other ways to interact with your OneDrive account. Have a look at the webdav backend: https://rclone.org/webdav/#sharepoint
|
||||
@@ -2,7 +2,7 @@
|
||||
title: "Overview of cloud storage systems"
|
||||
description: "Overview of cloud storage systems"
|
||||
type: page
|
||||
date: "2019-02-25"
|
||||
date: "2015-09-06"
|
||||
---
|
||||
|
||||
# Overview of cloud storage systems #
|
||||
@@ -28,7 +28,6 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| HTTP | - | No | No | No | R |
|
||||
| Hubic | MD5 | Yes | No | No | R/W |
|
||||
| Jottacloud | MD5 | Yes | Yes | No | R/W |
|
||||
| Koofr | MD5 | No | Yes | No | - |
|
||||
| Mega | - | No | No | Yes | - |
|
||||
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
|
||||
| Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R |
|
||||
|
||||
@@ -556,21 +556,6 @@ This takes the following parameters
|
||||
|
||||
Authentication is required for this call.
|
||||
|
||||
### operations/publiclink: Create or retrieve a public link to the given file or folder.
|
||||
|
||||
This takes the following parameters
|
||||
|
||||
- fs - a remote name string eg "drive:"
|
||||
- remote - a path within that remote eg "dir"
|
||||
|
||||
Returns
|
||||
|
||||
- url - URL of the resource
|
||||
|
||||
See the [link command](/commands/rclone_link/) command for more information on the above.
|
||||
|
||||
Authentication is required for this call.
|
||||
|
||||
### operations/purge: Remove a directory or container and all of its contents
|
||||
|
||||
This takes the following parameters
|
||||
|
||||
@@ -1112,11 +1112,6 @@ server_side_encryption =
|
||||
storage_class =
|
||||
```
|
||||
|
||||
If you are using an older version of CEPH, eg 10.2.x Jewel, then you
|
||||
may need to supply the parameter `--s3-upload-cutoff 0` or put this in
|
||||
the config file as `upload_cutoff 0` to work around a bug which causes
|
||||
uploading of small files to fail.
|
||||
|
||||
Note also that Ceph sometimes puts `/` in the passwords it gives
|
||||
users. If you read the secret access key using the command line tools
|
||||
you will get a JSON blob with the `/` escaped as `\/`. Make sure you
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
<div class="row">
|
||||
<hr>
|
||||
<div class="col-sm-12">
|
||||
<p>© <a href="https://www.craig-wood.com/nick/">Nick Craig-Wood</a> 2014-2019<br>
|
||||
<p>© <a href="https://www.craig-wood.com/nick/">Nick Craig-Wood</a> 2014-2017<br>
|
||||
Website hosted on a <a href="https://www.memset.com/dedicated-servers/vps/"><span style="font-weight: bold; font-family: arial black, arial, sans-serif; font-style: italic;">MEMSET CLOUD VPS</span></a>,
|
||||
uploaded with <a href="https://rclone.org">rclone</a>
|
||||
and built with <a href="https://github.com/spf13/hugo">Hugo</a></p>
|
||||
|
||||
@@ -67,7 +67,6 @@
|
||||
<li><a href="/http/"><i class="fa fa-globe"></i> HTTP</a></li>
|
||||
<li><a href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a></li>
|
||||
<li><a href="/jottacloud/"><i class="fa fa-cloud"></i> Jottacloud</a></li>
|
||||
<li><a href="/koofr/"><i class="fa fa-suitcase"></i> Koofr</a></li>
|
||||
<li><a href="/mega/"><i class="fa fa-archive"></i> Mega</a></li>
|
||||
<li><a href="/azureblob/"><i class="fa fa-windows"></i> Microsoft Azure Blob Storage</a></li>
|
||||
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft OneDrive</a></li>
|
||||
|
||||
@@ -67,7 +67,6 @@ type ConfigInfo struct {
|
||||
DataRateUnit string
|
||||
BackupDir string
|
||||
Suffix string
|
||||
SuffixKeepExtension bool
|
||||
UseListR bool
|
||||
BufferSize SizeSuffix
|
||||
BwLimit BwTimetable
|
||||
|
||||
@@ -68,7 +68,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
|
||||
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
|
||||
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix for use with --backup-dir.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.SuffixKeepExtension, "suffix-keep-extension", "", fs.Config.SuffixKeepExtension, "Preserve the extension when using --suffix.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.UseListR, "fast-list", "", fs.Config.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.")
|
||||
flags.Float64VarP(flagSet, &fs.Config.TPSLimit, "tpslimit", "", fs.Config.TPSLimit, "Limit HTTP transactions per second to this.")
|
||||
flags.IntVarP(flagSet, &fs.Config.TPSLimitBurst, "tpslimit-burst", "", fs.Config.TPSLimitBurst, "Max burst of transactions for --tpslimit.")
|
||||
|
||||
@@ -21,9 +21,8 @@ var Active = mustNewFilter(nil)
|
||||
|
||||
// rule is one filter rule
|
||||
type rule struct {
|
||||
Include bool
|
||||
Regexp *regexp.Regexp
|
||||
boundedRecursion bool
|
||||
Include bool
|
||||
Regexp *regexp.Regexp
|
||||
}
|
||||
|
||||
// Match returns true if rule matches path
|
||||
@@ -47,14 +46,13 @@ type rules struct {
|
||||
}
|
||||
|
||||
// add adds a rule if it doesn't exist already
|
||||
func (rs *rules) add(Include bool, re *regexp.Regexp, boundedRecursion bool) {
|
||||
func (rs *rules) add(Include bool, re *regexp.Regexp) {
|
||||
if rs.existing == nil {
|
||||
rs.existing = make(map[string]struct{})
|
||||
}
|
||||
newRule := rule{
|
||||
Include: Include,
|
||||
Regexp: re,
|
||||
boundedRecursion: boundedRecursion,
|
||||
Include: Include,
|
||||
Regexp: re,
|
||||
}
|
||||
newRuleString := newRule.String()
|
||||
if _, ok := rs.existing[newRuleString]; ok {
|
||||
@@ -75,23 +73,6 @@ func (rs *rules) len() int {
|
||||
return len(rs.rules)
|
||||
}
|
||||
|
||||
// boundedRecursion returns true if the set of filters would only
|
||||
// need bounded recursion to evaluate
|
||||
func (rs *rules) boundedRecursion() bool {
|
||||
var (
|
||||
excludeAll = false
|
||||
boundedRecursion = true
|
||||
)
|
||||
for _, rule := range rs.rules {
|
||||
if rule.Include {
|
||||
boundedRecursion = boundedRecursion && rule.boundedRecursion
|
||||
} else if rule.Regexp.String() == `^.*$` {
|
||||
excludeAll = true
|
||||
}
|
||||
}
|
||||
return excludeAll && boundedRecursion
|
||||
}
|
||||
|
||||
// FilesMap describes the map of files to transfer
|
||||
type FilesMap map[string]struct{}
|
||||
|
||||
@@ -251,8 +232,7 @@ func (f *Filter) addDirGlobs(Include bool, glob string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
boundedRecursion := globBoundedRecursion(dirGlob)
|
||||
f.dirRules.add(Include, dirRe, boundedRecursion)
|
||||
f.dirRules.add(Include, dirRe)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -268,9 +248,8 @@ func (f *Filter) Add(Include bool, glob string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
boundedRecursion := globBoundedRecursion(glob)
|
||||
if isFileRule {
|
||||
f.fileRules.add(Include, re, boundedRecursion)
|
||||
f.fileRules.add(Include, re)
|
||||
// If include rule work out what directories are needed to scan
|
||||
// if exclude rule, we can't rule anything out
|
||||
// Unless it is `*` which matches everything
|
||||
@@ -283,7 +262,7 @@ func (f *Filter) Add(Include bool, glob string) error {
|
||||
}
|
||||
}
|
||||
if isDirRule {
|
||||
f.dirRules.add(Include, re, boundedRecursion)
|
||||
f.dirRules.add(Include, re)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -364,12 +343,6 @@ func (f *Filter) InActive() bool {
|
||||
len(f.Opt.ExcludeFile) == 0)
|
||||
}
|
||||
|
||||
// BoundedRecursion returns true if the filter can be evaluated with
|
||||
// bounded recursion only.
|
||||
func (f *Filter) BoundedRecursion() bool {
|
||||
return f.fileRules.boundedRecursion()
|
||||
}
|
||||
|
||||
// includeRemote returns whether this remote passes the filter rules.
|
||||
func (f *Filter) includeRemote(remote string) bool {
|
||||
for _, rule := range f.fileRules.rules {
|
||||
|
||||
@@ -25,7 +25,6 @@ func TestNewFilterDefault(t *testing.T) {
|
||||
assert.Len(t, f.dirRules.rules, 0)
|
||||
assert.Nil(t, f.files)
|
||||
assert.True(t, f.InActive())
|
||||
assert.False(t, f.BoundedRecursion())
|
||||
}
|
||||
|
||||
// testFile creates a temp file with the contents
|
||||
@@ -104,38 +103,6 @@ func TestNewFilterFull(t *testing.T) {
|
||||
}
|
||||
}
|
||||
assert.False(t, f.InActive())
|
||||
assert.False(t, f.BoundedRecursion())
|
||||
}
|
||||
|
||||
func TestFilterBoundedRecursion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want bool
|
||||
}{
|
||||
{"", false},
|
||||
{"- /**", true},
|
||||
{"+ *.jpg", false},
|
||||
{"+ *.jpg\n- /**", false},
|
||||
{"+ /*.jpg\n- /**", true},
|
||||
{"+ *.png\n+ /*.jpg\n- /**", false},
|
||||
{"+ /*.png\n+ /*.jpg\n- /**", true},
|
||||
{"- *.jpg\n- /**", true},
|
||||
{"+ /*.jpg\n- /**", true},
|
||||
{"+ /*dir/\n- /**", true},
|
||||
{"+ /*dir/\n", false},
|
||||
{"+ /*dir/**\n- /**", false},
|
||||
{"+ **/pics*/*.jpg\n- /**", false},
|
||||
} {
|
||||
f, err := NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
for _, rule := range strings.Split(test.in, "\n") {
|
||||
if rule != "" {
|
||||
require.NoError(t, f.AddRule(rule))
|
||||
}
|
||||
}
|
||||
got := f.BoundedRecursion()
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
type includeTest struct {
|
||||
@@ -184,7 +151,6 @@ func TestNewFilterIncludeFiles(t *testing.T) {
|
||||
{"file3.jpg", 3, 0, false},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
assert.False(t, f.BoundedRecursion())
|
||||
}
|
||||
|
||||
func TestNewFilterIncludeFilesDirs(t *testing.T) {
|
||||
@@ -312,7 +278,6 @@ func TestNewFilterMinSize(t *testing.T) {
|
||||
{"potato/file2.jpg", 99, 0, false},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
assert.False(t, f.BoundedRecursion())
|
||||
}
|
||||
|
||||
func TestNewFilterMaxSize(t *testing.T) {
|
||||
|
||||
@@ -13,15 +13,9 @@ var (
|
||||
Opt = filter.DefaultOpt
|
||||
)
|
||||
|
||||
// Reload the filters from the flags
|
||||
func Reload() (err error) {
|
||||
filter.Active, err = filter.NewFilter(&Opt)
|
||||
return err
|
||||
}
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOptionReload("filter", &Opt, Reload)
|
||||
rc.AddOption("filter", &Opt)
|
||||
flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file")
|
||||
|
||||
@@ -167,15 +167,3 @@ func globToDirGlobs(glob string) (out []string) {
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// globBoundedRecursion returns true if the glob only needs bounded
|
||||
// recursion in the file tree to evaluate.
|
||||
func globBoundedRecursion(glob string) bool {
|
||||
if strings.Contains(glob, "**") {
|
||||
return false
|
||||
}
|
||||
if strings.HasPrefix(glob, "/") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -108,45 +108,3 @@ func TestGlobToDirGlobs(t *testing.T) {
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGlobBoundedRecursion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want bool
|
||||
}{
|
||||
{`*`, false},
|
||||
{`/*`, true},
|
||||
{`/**`, false},
|
||||
{`*.jpg`, false},
|
||||
{`/*.jpg`, true},
|
||||
{`/a/*.jpg`, true},
|
||||
{`/a/b/*.jpg`, true},
|
||||
{`*/*/*.jpg`, false},
|
||||
{`a/b/`, false},
|
||||
{`a/b`, false},
|
||||
{`a/b/*.{png,gif}`, false},
|
||||
{`/a/{jpg,png,gif}/*.{jpg,true,gif}`, true},
|
||||
{`a/{a,a*b,a**c}/d/`, false},
|
||||
{`/a/{a,a*b,a/c,d}/d/`, true},
|
||||
{`**`, false},
|
||||
{`a**`, false},
|
||||
{`a**b`, false},
|
||||
{`a**b**c**d`, false},
|
||||
{`a**b/c**d`, false},
|
||||
{`/A/a**b/B/c**d/C/`, false},
|
||||
{`/var/spool/**/ncw`, false},
|
||||
{`var/spool/**/ncw/`, false},
|
||||
{"/file1.jpg", true},
|
||||
{"/file2.png", true},
|
||||
{"/*.jpg", true},
|
||||
{"/*.png", true},
|
||||
{"/potato", true},
|
||||
{"/sausage1", true},
|
||||
{"/sausage2*", true},
|
||||
{"/sausage3**", false},
|
||||
{"/a/*.jpg", true},
|
||||
} {
|
||||
got := globBoundedRecursion(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
83
fs/fs.go
83
fs/fs.go
@@ -16,10 +16,8 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -61,7 +59,7 @@ var (
|
||||
ErrorNotAFile = errors.New("is a not a regular file")
|
||||
ErrorNotDeleting = errors.New("not deleting files as there were IO errors")
|
||||
ErrorNotDeletingDirs = errors.New("not deleting directories as there were IO errors")
|
||||
ErrorOverlapping = errors.New("can't sync or move files on overlapping remotes")
|
||||
ErrorCantMoveOverlapping = errors.New("can't move files on overlapping remotes")
|
||||
ErrorDirectoryNotEmpty = errors.New("directory not empty")
|
||||
ErrorImmutableModified = errors.New("immutable file modified")
|
||||
ErrorPermissionDenied = errors.New("permission denied")
|
||||
@@ -409,7 +407,6 @@ type Features struct {
|
||||
BucketBased bool // is bucket based (like s3, swift etc)
|
||||
SetTier bool // allows set tier functionality on objects
|
||||
GetTier bool // allows to retrieve storage tier of objects
|
||||
ServerSideAcrossConfigs bool // can server side copy between different remotes of the same type
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
@@ -1115,81 +1112,3 @@ func GetModifyWindow(fss ...Info) time.Duration {
|
||||
}
|
||||
return window
|
||||
}
|
||||
|
||||
// Pacer is a simple wrapper around a pacer.Pacer with logging.
|
||||
type Pacer struct {
|
||||
*pacer.Pacer
|
||||
}
|
||||
|
||||
type logCalculator struct {
|
||||
pacer.Calculator
|
||||
}
|
||||
|
||||
// NewPacer creates a Pacer for the given Fs and Calculator.
|
||||
func NewPacer(c pacer.Calculator) *Pacer {
|
||||
p := &Pacer{
|
||||
Pacer: pacer.New(
|
||||
pacer.InvokerOption(pacerInvoker),
|
||||
pacer.MaxConnectionsOption(Config.Checkers+Config.Transfers),
|
||||
pacer.RetriesOption(Config.LowLevelRetries),
|
||||
pacer.CalculatorOption(c),
|
||||
),
|
||||
}
|
||||
p.SetCalculator(c)
|
||||
return p
|
||||
}
|
||||
|
||||
func (d *logCalculator) Calculate(state pacer.State) time.Duration {
|
||||
oldSleepTime := state.SleepTime
|
||||
newSleepTime := d.Calculator.Calculate(state)
|
||||
if state.ConsecutiveRetries > 0 {
|
||||
if newSleepTime != oldSleepTime {
|
||||
Debugf("pacer", "Rate limited, increasing sleep to %v", newSleepTime)
|
||||
}
|
||||
} else {
|
||||
if newSleepTime != oldSleepTime {
|
||||
Debugf("pacer", "Reducing sleep to %v", newSleepTime)
|
||||
}
|
||||
}
|
||||
return newSleepTime
|
||||
}
|
||||
|
||||
// SetCalculator sets the pacing algorithm. Don't modify the Calculator object
|
||||
// afterwards, use the ModifyCalculator method when needed.
|
||||
//
|
||||
// It will choose the default algorithm if nil is passed in.
|
||||
func (p *Pacer) SetCalculator(c pacer.Calculator) {
|
||||
switch c.(type) {
|
||||
case *logCalculator:
|
||||
Logf("pacer", "Invalid Calculator in fs.Pacer.SetCalculator")
|
||||
case nil:
|
||||
c = &logCalculator{pacer.NewDefault()}
|
||||
default:
|
||||
c = &logCalculator{c}
|
||||
}
|
||||
|
||||
p.Pacer.SetCalculator(c)
|
||||
}
|
||||
|
||||
// ModifyCalculator calls the given function with the currently configured
|
||||
// Calculator and the Pacer lock held.
|
||||
func (p *Pacer) ModifyCalculator(f func(pacer.Calculator)) {
|
||||
p.ModifyCalculator(func(c pacer.Calculator) {
|
||||
switch _c := c.(type) {
|
||||
case *logCalculator:
|
||||
f(_c.Calculator)
|
||||
default:
|
||||
Logf("pacer", "Invalid Calculator in fs.Pacer: %t", c)
|
||||
f(c)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func pacerInvoker(try, retries int, f pacer.Paced) (retry bool, err error) {
|
||||
retry, err = f()
|
||||
if retry {
|
||||
Debugf("pacer", "low level retry %d/%d (error %v)", try, retries, err)
|
||||
err = fserrors.RetryError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2,15 +2,8 @@ package fs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -77,47 +70,3 @@ func TestOption(t *testing.T) {
|
||||
err = d.Set("sdfsdf")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
var errFoo = errors.New("foo")
|
||||
|
||||
type dummyPaced struct {
|
||||
retry bool
|
||||
called int
|
||||
wait *sync.Cond
|
||||
}
|
||||
|
||||
func (dp *dummyPaced) fn() (bool, error) {
|
||||
if dp.wait != nil {
|
||||
dp.wait.L.Lock()
|
||||
dp.wait.Wait()
|
||||
dp.wait.L.Unlock()
|
||||
}
|
||||
dp.called++
|
||||
return dp.retry, errFoo
|
||||
}
|
||||
|
||||
func TestPacerCall(t *testing.T) {
|
||||
expectedCalled := Config.LowLevelRetries
|
||||
if expectedCalled == 0 {
|
||||
expectedCalled = 20
|
||||
Config.LowLevelRetries = expectedCalled
|
||||
defer func() {
|
||||
Config.LowLevelRetries = 0
|
||||
}()
|
||||
}
|
||||
p := NewPacer(pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.Call(dp.fn)
|
||||
require.Equal(t, expectedCalled, dp.called)
|
||||
require.Implements(t, (*fserrors.Retrier)(nil), err)
|
||||
}
|
||||
|
||||
func TestPacerCallNoRetry(t *testing.T) {
|
||||
p := NewPacer(pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.CallNoRetry(dp.fn)
|
||||
require.Equal(t, 1, dp.called)
|
||||
require.Implements(t, (*fserrors.Retrier)(nil), err)
|
||||
}
|
||||
|
||||
@@ -194,7 +194,7 @@ func Cause(cause error) (retriable bool, err error) {
|
||||
// this case.
|
||||
err = prev
|
||||
}
|
||||
if reflect.DeepEqual(err, prev) {
|
||||
if err == prev {
|
||||
// Unpack any struct or *struct with a field
|
||||
// of name Err which satisfies the error
|
||||
// interface. This includes *url.Error,
|
||||
@@ -215,7 +215,7 @@ func Cause(cause error) (retriable bool, err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if reflect.DeepEqual(err, prev) {
|
||||
if err == prev {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,22 +191,25 @@ var _ pflag.Value = (*DeduplicateMode)(nil)
|
||||
|
||||
// dedupeFindDuplicateDirs scans f for duplicate directories
|
||||
func dedupeFindDuplicateDirs(f fs.Fs) ([][]fs.Directory, error) {
|
||||
dirs := map[string][]fs.Directory{}
|
||||
err := walk.ListR(f, "", true, fs.Config.MaxDepth, walk.ListDirs, func(entries fs.DirEntries) error {
|
||||
duplicateDirs := [][]fs.Directory{}
|
||||
err := walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dirs := map[string][]fs.Directory{}
|
||||
entries.ForDir(func(d fs.Directory) {
|
||||
dirs[d.Remote()] = append(dirs[d.Remote()], d)
|
||||
})
|
||||
for _, ds := range dirs {
|
||||
if len(ds) > 1 {
|
||||
duplicateDirs = append(duplicateDirs, ds)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "find duplicate dirs")
|
||||
}
|
||||
duplicateDirs := [][]fs.Directory{}
|
||||
for _, ds := range dirs {
|
||||
if len(ds) > 1 {
|
||||
duplicateDirs = append(duplicateDirs, ds)
|
||||
}
|
||||
}
|
||||
return duplicateDirs, nil
|
||||
}
|
||||
|
||||
@@ -265,7 +268,10 @@ func Deduplicate(f fs.Fs, mode DeduplicateMode) error {
|
||||
|
||||
// Now find duplicate files
|
||||
files := map[string][]fs.Object{}
|
||||
err := walk.ListR(f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err := walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
remote := o.Remote()
|
||||
files[remote] = append(files[remote], o)
|
||||
|
||||
@@ -161,7 +161,10 @@ func TestDeduplicateRename(t *testing.T) {
|
||||
err := operations.Deduplicate(r.Fremote, operations.DeduplicateRename)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, walk.ListR(r.Fremote, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
require.NoError(t, walk.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
remote := o.Remote()
|
||||
if remote != "one-1.txt" &&
|
||||
|
||||
@@ -89,7 +89,12 @@ func ListJSON(fsrc fs.Fs, remote string, opt *ListJSONOpt, callback func(*ListJS
|
||||
}
|
||||
}
|
||||
format := formatForPrecision(fsrc.Precision())
|
||||
err := walk.ListR(fsrc, remote, false, ConfigMaxDepth(opt.Recurse), walk.ListAll, func(entries fs.DirEntries) (err error) {
|
||||
err := walk.Walk(fsrc, remote, false, ConfigMaxDepth(opt.Recurse), func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
fs.Errorf(dirPath, "error listing: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, entry := range entries {
|
||||
item := ListJSONItem{
|
||||
Path: entry.Remote(),
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -273,7 +272,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec
|
||||
// Try server side copy first - if has optional interface and
|
||||
// is same underlying remote
|
||||
actionTaken = "Copied (server side copy)"
|
||||
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
|
||||
if doCopy := f.Features().Copy; doCopy != nil && SameConfig(src.Fs(), f) {
|
||||
newDst, err = doCopy(src, remote)
|
||||
if err == nil {
|
||||
dst = newDst
|
||||
@@ -392,7 +391,7 @@ func Move(fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Ob
|
||||
return newDst, nil
|
||||
}
|
||||
// See if we have Move available
|
||||
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) {
|
||||
if doMove := fdst.Features().Move; doMove != nil && SameConfig(src.Fs(), fdst) {
|
||||
// Delete destination if it exists
|
||||
if dst != nil {
|
||||
err = DeleteFile(dst)
|
||||
@@ -435,20 +434,6 @@ func CanServerSideMove(fdst fs.Fs) bool {
|
||||
return canMove || canCopy
|
||||
}
|
||||
|
||||
// SuffixName adds the current --suffix to the remote, obeying
|
||||
// --suffix-keep-extension if set
|
||||
func SuffixName(remote string) string {
|
||||
if fs.Config.Suffix == "" {
|
||||
return remote
|
||||
}
|
||||
if fs.Config.SuffixKeepExtension {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
return base + fs.Config.Suffix + ext
|
||||
}
|
||||
return remote + fs.Config.Suffix
|
||||
}
|
||||
|
||||
// DeleteFileWithBackupDir deletes a single file respecting --dry-run
|
||||
// and accumulating stats and errors.
|
||||
//
|
||||
@@ -470,7 +455,7 @@ func DeleteFileWithBackupDir(dst fs.Object, backupDir fs.Fs) (err error) {
|
||||
if !SameConfig(dst.Fs(), backupDir) {
|
||||
err = errors.New("parameter to --backup-dir has to be on the same remote as destination")
|
||||
} else {
|
||||
remoteWithSuffix := SuffixName(dst.Remote())
|
||||
remoteWithSuffix := dst.Remote() + fs.Config.Suffix
|
||||
overwritten, _ := backupDir.NewObject(remoteWithSuffix)
|
||||
_, err = Move(backupDir, overwritten, remoteWithSuffix, dst)
|
||||
}
|
||||
@@ -539,11 +524,6 @@ func DeleteFiles(toBeDeleted fs.ObjectsChan) error {
|
||||
return DeleteFilesWithBackupDir(toBeDeleted, nil)
|
||||
}
|
||||
|
||||
// SameRemoteType returns true if fdst and fsrc are the same type
|
||||
func SameRemoteType(fdst, fsrc fs.Info) bool {
|
||||
return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc)
|
||||
}
|
||||
|
||||
// SameConfig returns true if fdst and fsrc are using the same config
|
||||
// file entry
|
||||
func SameConfig(fdst, fsrc fs.Info) bool {
|
||||
@@ -552,7 +532,7 @@ func SameConfig(fdst, fsrc fs.Info) bool {
|
||||
|
||||
// Same returns true if fdst and fsrc point to the same underlying Fs
|
||||
func Same(fdst, fsrc fs.Info) bool {
|
||||
return SameConfig(fdst, fsrc) && strings.Trim(fdst.Root(), "/") == strings.Trim(fsrc.Root(), "/")
|
||||
return SameConfig(fdst, fsrc) && fdst.Root() == fsrc.Root()
|
||||
}
|
||||
|
||||
// Overlapping returns true if fdst and fsrc point to the same
|
||||
@@ -563,7 +543,7 @@ func Overlapping(fdst, fsrc fs.Info) bool {
|
||||
}
|
||||
// Return the Root with a trailing / if not empty
|
||||
fixedRoot := func(f fs.Info) string {
|
||||
s := strings.Trim(filepath.ToSlash(f.Root()), "/")
|
||||
s := strings.Trim(f.Root(), "/")
|
||||
if s != "" {
|
||||
s += "/"
|
||||
}
|
||||
@@ -830,7 +810,11 @@ func CheckDownload(fdst, fsrc fs.Fs, oneway bool) error {
|
||||
//
|
||||
// Lists in parallel which may get them out of order
|
||||
func ListFn(f fs.Fs, fn func(fs.Object)) error {
|
||||
return walk.ListR(f, "", false, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
return walk.Walk(f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
// FIXME count errors and carry on for listing
|
||||
return err
|
||||
}
|
||||
entries.ForObject(fn)
|
||||
return nil
|
||||
})
|
||||
@@ -946,7 +930,11 @@ func ConfigMaxDepth(recursive bool) int {
|
||||
|
||||
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
|
||||
func ListDir(f fs.Fs, w io.Writer) error {
|
||||
return walk.ListR(f, "", false, ConfigMaxDepth(false), walk.ListDirs, func(entries fs.DirEntries) error {
|
||||
return walk.Walk(f, "", false, ConfigMaxDepth(false), func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
// FIXME count errors and carry on for listing
|
||||
return err
|
||||
}
|
||||
entries.ForDir(func(dir fs.Directory) {
|
||||
if dir != nil {
|
||||
syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime().Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote())
|
||||
@@ -1054,17 +1042,21 @@ func listToChan(f fs.Fs, dir string) fs.ObjectsChan {
|
||||
o := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(o)
|
||||
err := walk.ListR(f, dir, true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
_ = walk.Walk(f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil
|
||||
}
|
||||
err = errors.Errorf("Failed to list: %v", err)
|
||||
fs.CountError(err)
|
||||
fs.Errorf(nil, "%v", err)
|
||||
return nil
|
||||
}
|
||||
entries.ForObject(func(obj fs.Object) {
|
||||
o <- obj
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
err = errors.Wrap(err, "failed to list")
|
||||
fs.CountError(err)
|
||||
fs.Errorf(nil, "%v", err)
|
||||
}
|
||||
}()
|
||||
return o
|
||||
}
|
||||
@@ -1487,7 +1479,8 @@ type ListFormat struct {
|
||||
separator string
|
||||
dirSlash bool
|
||||
absolute bool
|
||||
output []func(entry *ListJSONItem) string
|
||||
output []func() string
|
||||
entry fs.DirEntry
|
||||
csv *csv.Writer
|
||||
buf bytes.Buffer
|
||||
}
|
||||
@@ -1523,91 +1516,76 @@ func (l *ListFormat) SetCSV(useCSV bool) {
|
||||
}
|
||||
|
||||
// SetOutput sets functions used to create files information
|
||||
func (l *ListFormat) SetOutput(output []func(entry *ListJSONItem) string) {
|
||||
func (l *ListFormat) SetOutput(output []func() string) {
|
||||
l.output = output
|
||||
}
|
||||
|
||||
// AddModTime adds file's Mod Time to output
|
||||
func (l *ListFormat) AddModTime() {
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return entry.ModTime.When.Local().Format("2006-01-02 15:04:05")
|
||||
})
|
||||
l.AppendOutput(func() string { return l.entry.ModTime().Local().Format("2006-01-02 15:04:05") })
|
||||
}
|
||||
|
||||
// AddSize adds file's size to output
|
||||
func (l *ListFormat) AddSize() {
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return strconv.FormatInt(entry.Size, 10)
|
||||
l.AppendOutput(func() string {
|
||||
return strconv.FormatInt(l.entry.Size(), 10)
|
||||
})
|
||||
}
|
||||
|
||||
// normalisePath makes sure the path has the correct slashes for the current mode
|
||||
func (l *ListFormat) normalisePath(entry *ListJSONItem, remote string) string {
|
||||
if l.absolute && !strings.HasPrefix(remote, "/") {
|
||||
remote = "/" + remote
|
||||
}
|
||||
if entry.IsDir && l.dirSlash {
|
||||
remote += "/"
|
||||
}
|
||||
return remote
|
||||
}
|
||||
|
||||
// AddPath adds path to file to output
|
||||
func (l *ListFormat) AddPath() {
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return l.normalisePath(entry, entry.Path)
|
||||
})
|
||||
}
|
||||
|
||||
// AddEncrypted adds the encrypted path to file to output
|
||||
func (l *ListFormat) AddEncrypted() {
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return l.normalisePath(entry, entry.Encrypted)
|
||||
l.AppendOutput(func() string {
|
||||
remote := l.entry.Remote()
|
||||
if l.absolute && !strings.HasPrefix(remote, "/") {
|
||||
remote = "/" + remote
|
||||
}
|
||||
_, isDir := l.entry.(fs.Directory)
|
||||
if isDir && l.dirSlash {
|
||||
remote += "/"
|
||||
}
|
||||
return remote
|
||||
})
|
||||
}
|
||||
|
||||
// AddHash adds the hash of the type given to the output
|
||||
func (l *ListFormat) AddHash(ht hash.Type) {
|
||||
hashName := ht.String()
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
if entry.IsDir {
|
||||
l.AppendOutput(func() string {
|
||||
o, ok := l.entry.(fs.Object)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return entry.Hashes[hashName]
|
||||
return hashSum(ht, o)
|
||||
})
|
||||
}
|
||||
|
||||
// AddID adds file's ID to the output if known
|
||||
func (l *ListFormat) AddID() {
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return entry.ID
|
||||
})
|
||||
}
|
||||
|
||||
// AddOrigID adds file's Original ID to the output if known
|
||||
func (l *ListFormat) AddOrigID() {
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return entry.OrigID
|
||||
l.AppendOutput(func() string {
|
||||
if do, ok := l.entry.(fs.IDer); ok {
|
||||
return do.ID()
|
||||
}
|
||||
return ""
|
||||
})
|
||||
}
|
||||
|
||||
// AddMimeType adds file's MimeType to the output if known
|
||||
func (l *ListFormat) AddMimeType() {
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return entry.MimeType
|
||||
l.AppendOutput(func() string {
|
||||
return fs.MimeTypeDirEntry(l.entry)
|
||||
})
|
||||
}
|
||||
|
||||
// AppendOutput adds string generated by specific function to printed output
|
||||
func (l *ListFormat) AppendOutput(functionToAppend func(item *ListJSONItem) string) {
|
||||
func (l *ListFormat) AppendOutput(functionToAppend func() string) {
|
||||
l.output = append(l.output, functionToAppend)
|
||||
}
|
||||
|
||||
// Format prints information about the DirEntry in the format defined
|
||||
func (l *ListFormat) Format(entry *ListJSONItem) (result string) {
|
||||
func (l *ListFormat) Format(entry fs.DirEntry) (result string) {
|
||||
l.entry = entry
|
||||
var out []string
|
||||
for _, fun := range l.output {
|
||||
out = append(out, fun(entry))
|
||||
out = append(out, fun())
|
||||
}
|
||||
if l.csv != nil {
|
||||
l.buf.Reset()
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/filter"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/list"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -231,33 +232,6 @@ func TestHashSums(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSuffixName(t *testing.T) {
|
||||
origSuffix, origKeepExt := fs.Config.Suffix, fs.Config.SuffixKeepExtension
|
||||
defer func() {
|
||||
fs.Config.Suffix, fs.Config.SuffixKeepExtension = origSuffix, origKeepExt
|
||||
}()
|
||||
for _, test := range []struct {
|
||||
remote string
|
||||
suffix string
|
||||
keepExt bool
|
||||
want string
|
||||
}{
|
||||
{"test.txt", "", false, "test.txt"},
|
||||
{"test.txt", "", true, "test.txt"},
|
||||
{"test.txt", "-suffix", false, "test.txt-suffix"},
|
||||
{"test.txt", "-suffix", true, "test-suffix.txt"},
|
||||
{"test.txt.csv", "-suffix", false, "test.txt.csv-suffix"},
|
||||
{"test.txt.csv", "-suffix", true, "test.txt-suffix.csv"},
|
||||
{"test", "-suffix", false, "test-suffix"},
|
||||
{"test", "-suffix", true, "test-suffix"},
|
||||
} {
|
||||
fs.Config.Suffix = test.suffix
|
||||
fs.Config.SuffixKeepExtension = test.keepExt
|
||||
got := operations.SuffixName(test.remote)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("%+v", test))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
@@ -804,7 +778,6 @@ func TestSame(t *testing.T) {
|
||||
|
||||
func TestOverlapping(t *testing.T) {
|
||||
a := &testFsInfo{name: "name", root: "root"}
|
||||
slash := string(os.PathSeparator) // native path separator
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
root string
|
||||
@@ -817,8 +790,6 @@ func TestOverlapping(t *testing.T) {
|
||||
{"name", "roo", false},
|
||||
{"name", "root/toot", true},
|
||||
{"name", "root/toot/", true},
|
||||
{"name", "root" + slash + "toot", true},
|
||||
{"name", "root" + slash + "toot" + slash, true},
|
||||
{"name", "", true},
|
||||
{"name", "/", true},
|
||||
} {
|
||||
@@ -902,90 +873,61 @@ func TestCheckEqualReaders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListFormat(t *testing.T) {
|
||||
item0 := &operations.ListJSONItem{
|
||||
Path: "a",
|
||||
Name: "a",
|
||||
Encrypted: "encryptedFileName",
|
||||
Size: 1,
|
||||
MimeType: "application/octet-stream",
|
||||
ModTime: operations.Timestamp{
|
||||
When: t1,
|
||||
Format: "2006-01-02T15:04:05.000000000Z07:00"},
|
||||
IsDir: false,
|
||||
Hashes: map[string]string{
|
||||
"MD5": "0cc175b9c0f1b6a831c399e269772661",
|
||||
"SHA-1": "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8",
|
||||
"DropboxHash": "bf5d3affb73efd2ec6c36ad3112dd933efed63c4e1cbffcfa88e2759c144f2d8",
|
||||
"QuickXorHash": "6100000000000000000000000100000000000000"},
|
||||
ID: "fileID",
|
||||
OrigID: "fileOrigID",
|
||||
}
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("a", "a", t1)
|
||||
file2 := r.WriteObject("subdir/b", "b", t1)
|
||||
|
||||
item1 := &operations.ListJSONItem{
|
||||
Path: "subdir",
|
||||
Name: "subdir",
|
||||
Encrypted: "encryptedDirName",
|
||||
Size: -1,
|
||||
MimeType: "inode/directory",
|
||||
ModTime: operations.Timestamp{
|
||||
When: t2,
|
||||
Format: "2006-01-02T15:04:05.000000000Z07:00"},
|
||||
IsDir: true,
|
||||
Hashes: map[string]string(nil),
|
||||
ID: "dirID",
|
||||
OrigID: "dirOrigID",
|
||||
}
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
|
||||
items, _ := list.DirSorted(r.Fremote, true, "")
|
||||
var list operations.ListFormat
|
||||
list.AddPath()
|
||||
list.SetDirSlash(false)
|
||||
assert.Equal(t, "subdir", list.Format(item1))
|
||||
assert.Equal(t, "subdir", list.Format(items[1]))
|
||||
|
||||
list.SetDirSlash(true)
|
||||
assert.Equal(t, "subdir/", list.Format(item1))
|
||||
assert.Equal(t, "subdir/", list.Format(items[1]))
|
||||
|
||||
list.SetOutput(nil)
|
||||
assert.Equal(t, "", list.Format(item1))
|
||||
assert.Equal(t, "", list.Format(items[1]))
|
||||
|
||||
list.AppendOutput(func(item *operations.ListJSONItem) string { return "a" })
|
||||
list.AppendOutput(func(item *operations.ListJSONItem) string { return "b" })
|
||||
assert.Equal(t, "ab", list.Format(item1))
|
||||
list.AppendOutput(func() string { return "a" })
|
||||
list.AppendOutput(func() string { return "b" })
|
||||
assert.Equal(t, "ab", list.Format(items[1]))
|
||||
list.SetSeparator(":::")
|
||||
assert.Equal(t, "a:::b", list.Format(item1))
|
||||
assert.Equal(t, "a:::b", list.Format(items[1]))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddModTime()
|
||||
assert.Equal(t, t1.Local().Format("2006-01-02 15:04:05"), list.Format(item0))
|
||||
assert.Equal(t, items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0]))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.SetSeparator("|")
|
||||
list.AddID()
|
||||
list.AddOrigID()
|
||||
assert.Equal(t, "fileID|fileOrigID", list.Format(item0))
|
||||
assert.Equal(t, "dirID|dirOrigID", list.Format(item1))
|
||||
_ = list.Format(items[0]) // Can't really check anything - at least it didn't panic!
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddMimeType()
|
||||
assert.Contains(t, list.Format(item0), "/")
|
||||
assert.Equal(t, "inode/directory", list.Format(item1))
|
||||
assert.Contains(t, list.Format(items[0]), "/")
|
||||
assert.Equal(t, "inode/directory", list.Format(items[1]))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddPath()
|
||||
list.SetAbsolute(true)
|
||||
assert.Equal(t, "/a", list.Format(item0))
|
||||
assert.Equal(t, "/a", list.Format(items[0]))
|
||||
list.SetAbsolute(false)
|
||||
assert.Equal(t, "a", list.Format(item0))
|
||||
assert.Equal(t, "a", list.Format(items[0]))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddSize()
|
||||
assert.Equal(t, "1", list.Format(item0))
|
||||
assert.Equal(t, "1", list.Format(items[0]))
|
||||
|
||||
list.AddPath()
|
||||
list.AddModTime()
|
||||
list.SetDirSlash(true)
|
||||
list.SetSeparator("__SEP__")
|
||||
assert.Equal(t, "1__SEP__a__SEP__"+t1.Local().Format("2006-01-02 15:04:05"), list.Format(item0))
|
||||
assert.Equal(t, "-1__SEP__subdir/__SEP__"+t2.Local().Format("2006-01-02 15:04:05"), list.Format(item1))
|
||||
assert.Equal(t, "1__SEP__a__SEP__"+items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0]))
|
||||
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"__SEP__subdir/__SEP__"+items[1].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[1]))
|
||||
|
||||
for _, test := range []struct {
|
||||
ht hash.Type
|
||||
@@ -997,7 +939,10 @@ func TestListFormat(t *testing.T) {
|
||||
} {
|
||||
list.SetOutput(nil)
|
||||
list.AddHash(test.ht)
|
||||
assert.Equal(t, test.want, list.Format(item0))
|
||||
got := list.Format(items[0])
|
||||
if got != "UNSUPPORTED" && got != "" {
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
list.SetOutput(nil)
|
||||
@@ -1007,15 +952,8 @@ func TestListFormat(t *testing.T) {
|
||||
list.AddPath()
|
||||
list.AddModTime()
|
||||
list.SetDirSlash(true)
|
||||
assert.Equal(t, "1|a|"+t1.Local().Format("2006-01-02 15:04:05"), list.Format(item0))
|
||||
assert.Equal(t, "-1|subdir/|"+t2.Local().Format("2006-01-02 15:04:05"), list.Format(item1))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.SetSeparator("|")
|
||||
list.AddPath()
|
||||
list.AddEncrypted()
|
||||
assert.Equal(t, "a|encryptedFileName", list.Format(item0))
|
||||
assert.Equal(t, "subdir/|encryptedDirName/", list.Format(item1))
|
||||
assert.Equal(t, "1|a|"+items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0]))
|
||||
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"|subdir/|"+items[1].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[1]))
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -172,7 +172,7 @@ See the [` + op.name + ` command](/commands/rclone_` + op.name + `/) command for
|
||||
}
|
||||
}
|
||||
|
||||
// Run a single command, eg Mkdir
|
||||
// Mkdir a directory
|
||||
func rcSingleCommand(in rc.Params, name string, noRemote bool) (out rc.Params, err error) {
|
||||
var (
|
||||
f fs.Fs
|
||||
@@ -240,7 +240,7 @@ See the [size command](/commands/rclone_size/) command for more information on t
|
||||
})
|
||||
}
|
||||
|
||||
// Size a directory
|
||||
// Mkdir a directory
|
||||
func rcSize(in rc.Params) (out rc.Params, err error) {
|
||||
f, err := rc.GetFs(in)
|
||||
if err != nil {
|
||||
@@ -255,38 +255,3 @@ func rcSize(in rc.Params) (out rc.Params, err error) {
|
||||
out["bytes"] = bytes
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "operations/publiclink",
|
||||
AuthRequired: true,
|
||||
Fn: rcPublicLink,
|
||||
Title: "Create or retrieve a public link to the given file or folder.",
|
||||
Help: `This takes the following parameters
|
||||
|
||||
- fs - a remote name string eg "drive:"
|
||||
- remote - a path within that remote eg "dir"
|
||||
|
||||
Returns
|
||||
|
||||
- url - URL of the resource
|
||||
|
||||
See the [link command](/commands/rclone_link/) command for more information on the above.
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
// Make a public link
|
||||
func rcPublicLink(in rc.Params) (out rc.Params, err error) {
|
||||
f, remote, err := rc.GetFsAndRemote(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
url, err := PublicLink(f, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = make(rc.Params)
|
||||
out["url"] = url
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -356,16 +356,3 @@ func TestRcSize(t *testing.T) {
|
||||
"bytes": int64(120),
|
||||
}, out)
|
||||
}
|
||||
|
||||
// operations/publiclink: Create or retrieve a public link to the given file or folder.
|
||||
func TestRcPublicLink(t *testing.T) {
|
||||
r, call := rcNewRun(t, "operations/publiclink")
|
||||
defer r.Finalise()
|
||||
in := rc.Params{
|
||||
"fs": r.FremoteName,
|
||||
"remote": "",
|
||||
}
|
||||
_, err := call.Fn(in)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "doesn't support public links")
|
||||
}
|
||||
|
||||
@@ -8,23 +8,13 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
optionBlock = map[string]interface{}{}
|
||||
optionReload = map[string]func() error{}
|
||||
)
|
||||
var optionBlock = map[string]interface{}{}
|
||||
|
||||
// AddOption adds an option set
|
||||
func AddOption(name string, option interface{}) {
|
||||
optionBlock[name] = option
|
||||
}
|
||||
|
||||
// AddOptionReload adds an option set with a reload function to be
|
||||
// called when options are changed
|
||||
func AddOptionReload(name string, option interface{}, reload func() error) {
|
||||
optionBlock[name] = option
|
||||
optionReload[name] = reload
|
||||
}
|
||||
|
||||
func init() {
|
||||
Add(Call{
|
||||
Path: "options/blocks",
|
||||
@@ -113,12 +103,7 @@ func rcOptionsSet(in Params) (out Params, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to write options from block %q", name)
|
||||
}
|
||||
if reload := optionReload[name]; reload != nil {
|
||||
err = reload()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reload options from block %q", name)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
package rc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -26,21 +24,9 @@ func TestAddOption(t *testing.T) {
|
||||
assert.Equal(t, len(optionBlock), 0)
|
||||
AddOption("potato", &testOptions)
|
||||
assert.Equal(t, len(optionBlock), 1)
|
||||
assert.Equal(t, len(optionReload), 0)
|
||||
assert.Equal(t, &testOptions, optionBlock["potato"])
|
||||
}
|
||||
|
||||
func TestAddOptionReload(t *testing.T) {
|
||||
defer clearOptionBlock()
|
||||
assert.Equal(t, len(optionBlock), 0)
|
||||
reload := func() error { return nil }
|
||||
AddOptionReload("potato", &testOptions, reload)
|
||||
assert.Equal(t, len(optionBlock), 1)
|
||||
assert.Equal(t, len(optionReload), 1)
|
||||
assert.Equal(t, &testOptions, optionBlock["potato"])
|
||||
assert.Equal(t, fmt.Sprintf("%p", reload), fmt.Sprintf("%p", optionReload["potato"]))
|
||||
}
|
||||
|
||||
func TestOptionsBlocks(t *testing.T) {
|
||||
defer clearOptionBlock()
|
||||
AddOption("potato", &testOptions)
|
||||
@@ -67,14 +53,7 @@ func TestOptionsGet(t *testing.T) {
|
||||
|
||||
func TestOptionsSet(t *testing.T) {
|
||||
defer clearOptionBlock()
|
||||
var reloaded int
|
||||
AddOptionReload("potato", &testOptions, func() error {
|
||||
if reloaded > 0 {
|
||||
return errors.New("error while reloading")
|
||||
}
|
||||
reloaded++
|
||||
return nil
|
||||
})
|
||||
AddOption("potato", &testOptions)
|
||||
call := Calls.Get("options/set")
|
||||
require.NotNil(t, call)
|
||||
|
||||
@@ -88,12 +67,6 @@ func TestOptionsSet(t *testing.T) {
|
||||
require.Nil(t, out)
|
||||
assert.Equal(t, 50, testOptions.Int)
|
||||
assert.Equal(t, "hello", testOptions.String)
|
||||
assert.Equal(t, 1, reloaded)
|
||||
|
||||
// error from reload
|
||||
_, err = call.Fn(in)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "error while reloading")
|
||||
|
||||
// unknown option block
|
||||
in = Params{
|
||||
@@ -112,5 +85,4 @@ func TestOptionsSet(t *testing.T) {
|
||||
_, err = call.Fn(in)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to write options")
|
||||
|
||||
}
|
||||
|
||||
@@ -188,8 +188,8 @@ func rcJobStatus(in Params) (out Params, err error) {
|
||||
defer job.mu.Unlock()
|
||||
out = make(Params)
|
||||
err = Reshape(&out, job)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "reshape failed in job status")
|
||||
if job == nil {
|
||||
return nil, errors.New("Reshape failed in job status")
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -39,21 +39,17 @@ func rcSyncCopyMove(in rc.Params, name string) (out rc.Params, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createEmptySrcDirs, err := in.GetBool("createEmptySrcDirs")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
switch name {
|
||||
case "sync":
|
||||
return nil, Sync(dstFs, srcFs, createEmptySrcDirs)
|
||||
return nil, Sync(dstFs, srcFs)
|
||||
case "copy":
|
||||
return nil, CopyDir(dstFs, srcFs, createEmptySrcDirs)
|
||||
return nil, CopyDir(dstFs, srcFs)
|
||||
case "move":
|
||||
deleteEmptySrcDirs, err := in.GetBool("deleteEmptySrcDirs")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs, createEmptySrcDirs)
|
||||
return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs)
|
||||
}
|
||||
panic("unknown rcSyncCopyMove type")
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ type syncCopyMove struct {
|
||||
fsrc fs.Fs
|
||||
deleteMode fs.DeleteMode // how we are doing deletions
|
||||
DoMove bool
|
||||
copyEmptySrcDirs bool
|
||||
deleteEmptySrcDirs bool
|
||||
dir string
|
||||
// internal state
|
||||
@@ -64,16 +63,12 @@ type syncCopyMove struct {
|
||||
suffix string // suffix to add to files placed in backupDir
|
||||
}
|
||||
|
||||
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) {
|
||||
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.Overlapping(fdst, fsrc) {
|
||||
return nil, fserrors.FatalError(fs.ErrorOverlapping)
|
||||
}
|
||||
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) (*syncCopyMove, error) {
|
||||
s := &syncCopyMove{
|
||||
fdst: fdst,
|
||||
fsrc: fsrc,
|
||||
deleteMode: deleteMode,
|
||||
DoMove: DoMove,
|
||||
copyEmptySrcDirs: copyEmptySrcDirs,
|
||||
deleteEmptySrcDirs: deleteEmptySrcDirs,
|
||||
dir: "",
|
||||
srcFilesChan: make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers),
|
||||
@@ -226,7 +221,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, wg *sync.WaitGroup) {
|
||||
} else {
|
||||
// If destination already exists, then we must move it into --backup-dir if required
|
||||
if pair.Dst != nil && s.backupDir != nil {
|
||||
remoteWithSuffix := operations.SuffixName(pair.Dst.Remote())
|
||||
remoteWithSuffix := pair.Dst.Remote() + s.suffix
|
||||
overwritten, _ := s.backupDir.NewObject(remoteWithSuffix)
|
||||
_, err := operations.Move(s.backupDir, overwritten, remoteWithSuffix, pair.Dst)
|
||||
if err != nil {
|
||||
@@ -691,9 +686,7 @@ func (s *syncCopyMove) run() error {
|
||||
s.stopTransfers()
|
||||
s.stopDeleters()
|
||||
|
||||
if s.copyEmptySrcDirs {
|
||||
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs))
|
||||
}
|
||||
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs))
|
||||
|
||||
// Delete files after
|
||||
if s.deleteMode == fs.DeleteModeAfter {
|
||||
@@ -856,7 +849,7 @@ func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
|
||||
// If DoMove is true then files will be moved instead of copied
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) error {
|
||||
if deleteMode != fs.DeleteModeOff && DoMove {
|
||||
return fserrors.FatalError(errors.New("can't delete and move at the same time"))
|
||||
}
|
||||
@@ -866,7 +859,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
|
||||
}
|
||||
// only delete stuff during in this pass
|
||||
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -877,7 +870,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
// Next pass does a copy only
|
||||
deleteMode = fs.DeleteModeOff
|
||||
}
|
||||
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -885,22 +878,22 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
}
|
||||
|
||||
// Sync fsrc into fdst
|
||||
func Sync(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false, copyEmptySrcDirs)
|
||||
func Sync(fdst, fsrc fs.Fs) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false)
|
||||
}
|
||||
|
||||
// CopyDir copies fsrc into fdst
|
||||
func CopyDir(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs)
|
||||
func CopyDir(fdst, fsrc fs.Fs) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false)
|
||||
}
|
||||
|
||||
// moveDir moves fsrc into fdst
|
||||
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs)
|
||||
}
|
||||
|
||||
// MoveDir moves fsrc into fdst
|
||||
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
|
||||
if operations.Same(fdst, fsrc) {
|
||||
fs.Errorf(fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
@@ -927,6 +920,13 @@ func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) e
|
||||
}
|
||||
}
|
||||
|
||||
// The two remotes mustn't overlap if we didn't do server side move
|
||||
if operations.Overlapping(fdst, fsrc) {
|
||||
err := fs.ErrorCantMoveOverlapping
|
||||
fs.Errorf(fdst, "%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Otherwise move the files one by one
|
||||
return moveDir(fdst, fsrc, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
return moveDir(fdst, fsrc, deleteEmptySrcDirs)
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/filter"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
@@ -40,7 +39,7 @@ func TestCopyWithDryRun(t *testing.T) {
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
fs.Config.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -55,7 +54,7 @@ func TestCopy(t *testing.T) {
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -72,7 +71,7 @@ func TestCopyNoTraverse(t *testing.T) {
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -90,7 +89,7 @@ func TestSyncNoTraverse(t *testing.T) {
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -108,7 +107,7 @@ func TestCopyWithDepth(t *testing.T) {
|
||||
fs.Config.MaxDepth = 1
|
||||
defer func() { fs.Config.MaxDepth = -1 }()
|
||||
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
@@ -136,7 +135,7 @@ func TestCopyWithFilesFrom(t *testing.T) {
|
||||
}
|
||||
defer unpatch()
|
||||
|
||||
err = CopyDir(r.Fremote, r.Flocal, false)
|
||||
err = CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
unpatch()
|
||||
|
||||
@@ -153,59 +152,7 @@ func TestCopyEmptyDirectories(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err = CopyDir(r.Fremote, r.Flocal, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
[]fstest.Item{
|
||||
file1,
|
||||
},
|
||||
[]string{
|
||||
"sub dir",
|
||||
"sub dir2",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
}
|
||||
|
||||
// Test move empty directories
|
||||
func TestMoveEmptyDirectories(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
err := operations.Mkdir(r.Flocal, "sub dir2")
|
||||
require.NoError(t, err)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err = MoveDir(r.Fremote, r.Flocal, false, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
[]fstest.Item{
|
||||
file1,
|
||||
},
|
||||
[]string{
|
||||
"sub dir",
|
||||
"sub dir2",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
}
|
||||
|
||||
// Test sync empty directories
|
||||
func TestSyncEmptyDirectories(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
err := operations.Mkdir(r.Flocal, "sub dir2")
|
||||
require.NoError(t, err)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err = Sync(r.Fremote, r.Flocal, true)
|
||||
err = CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -234,7 +181,7 @@ func TestServerSideCopy(t *testing.T) {
|
||||
defer finaliseCopy()
|
||||
t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy)
|
||||
|
||||
err = CopyDir(FremoteCopy, r.Fremote, false)
|
||||
err = CopyDir(FremoteCopy, r.Fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, FremoteCopy, file1)
|
||||
@@ -252,7 +199,7 @@ func TestCopyAfterDelete(t *testing.T) {
|
||||
err := operations.Mkdir(r.Flocal, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = CopyDir(r.Fremote, r.Flocal, false)
|
||||
err = CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal)
|
||||
@@ -266,7 +213,7 @@ func TestCopyRedownload(t *testing.T) {
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
err := CopyDir(r.Flocal, r.Fremote, false)
|
||||
err := CopyDir(r.Flocal, r.Fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test with combined precision of local and remote as we copied it there and back
|
||||
@@ -286,7 +233,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
@@ -298,7 +245,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
@@ -320,7 +267,7 @@ func TestSyncSizeOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
@@ -332,7 +279,7 @@ func TestSyncSizeOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
@@ -354,7 +301,7 @@ func TestSyncIgnoreSize(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
@@ -366,7 +313,7 @@ func TestSyncIgnoreSize(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
@@ -382,7 +329,7 @@ func TestSyncIgnoreTimes(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly 0 files because the
|
||||
@@ -393,7 +340,7 @@ func TestSyncIgnoreTimes(t *testing.T) {
|
||||
defer func() { fs.Config.IgnoreTimes = false }()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file even though the
|
||||
@@ -413,7 +360,7 @@ func TestSyncIgnoreExisting(t *testing.T) {
|
||||
defer func() { fs.Config.IgnoreExisting = false }()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
@@ -421,7 +368,7 @@ func TestSyncIgnoreExisting(t *testing.T) {
|
||||
// Change everything
|
||||
r.WriteFile("existing", "newpotatoes", t2)
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
// Items should not change
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
@@ -469,7 +416,7 @@ func TestSyncIgnoreErrors(t *testing.T) {
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
fs.CountError(nil)
|
||||
assert.NoError(t, Sync(r.Fremote, r.Flocal, false))
|
||||
assert.NoError(t, Sync(r.Fremote, r.Flocal))
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
@@ -512,7 +459,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
defer func() { fs.Config.DryRun = false }()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -521,7 +468,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
fs.Config.DryRun = false
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -549,7 +496,7 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -570,7 +517,7 @@ func TestSyncDoesntUpdateModtime(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -590,7 +537,7 @@ func TestSyncAfterAddingAFile(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
@@ -605,7 +552,7 @@ func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
@@ -628,7 +575,7 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
@@ -644,7 +591,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
|
||||
|
||||
fs.Config.DryRun = true
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
fs.Config.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -663,7 +610,7 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file3)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file3)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file3)
|
||||
@@ -709,7 +656,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDir(t *testing.T) {
|
||||
)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -779,7 +726,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
fs.CountError(nil)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
assert.Equal(t, fs.ErrorNotDeleting, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -856,7 +803,7 @@ func TestCopyDeleteBefore(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
@@ -879,14 +826,14 @@ func TestSyncWithExclude(t *testing.T) {
|
||||
}()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Fremote, file2, file1)
|
||||
|
||||
// Now sync the other way round and check enormous doesn't get
|
||||
// deleted as it is excluded from the sync
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Flocal, r.Fremote, false)
|
||||
err = Sync(r.Flocal, r.Fremote)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file2, file1, file3)
|
||||
}
|
||||
@@ -909,14 +856,14 @@ func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
|
||||
}()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
// Check sync the other way round to make sure enormous gets
|
||||
// deleted even though it is excluded
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Flocal, r.Fremote, false)
|
||||
err = Sync(r.Flocal, r.Fremote)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
}
|
||||
@@ -951,7 +898,7 @@ func TestSyncWithUpdateOlder(t *testing.T) {
|
||||
}()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Fremote, oneO, twoF, threeO, fourF, fiveF)
|
||||
}
|
||||
@@ -975,7 +922,7 @@ func TestSyncWithTrackRenames(t *testing.T) {
|
||||
f2 := r.WriteFile("yam", "Yam Content", t2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
require.NoError(t, Sync(r.Fremote, r.Flocal, false))
|
||||
require.NoError(t, Sync(r.Fremote, r.Flocal))
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, f1, f2)
|
||||
fstest.CheckItems(t, r.Flocal, f1, f2)
|
||||
@@ -984,7 +931,7 @@ func TestSyncWithTrackRenames(t *testing.T) {
|
||||
f2 = r.RenameFile(f2, "yaml")
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
require.NoError(t, Sync(r.Fremote, r.Flocal, false))
|
||||
require.NoError(t, Sync(r.Fremote, r.Flocal))
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, f1, f2)
|
||||
|
||||
@@ -1021,7 +968,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
|
||||
|
||||
// Do server side move
|
||||
accounting.Stats.ResetCounters()
|
||||
err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs, false)
|
||||
err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs)
|
||||
require.NoError(t, err)
|
||||
|
||||
if withFilter {
|
||||
@@ -1048,7 +995,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
|
||||
|
||||
// Move it back to a new empty remote, dst does not exist this time
|
||||
accounting.Stats.ResetCounters()
|
||||
err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs, false)
|
||||
err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs)
|
||||
require.NoError(t, err)
|
||||
|
||||
if withFilter {
|
||||
@@ -1073,7 +1020,7 @@ func TestMoveWithDeleteEmptySrcDirs(t *testing.T) {
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
// run move with --delete-empty-src-dirs
|
||||
err := MoveDir(r.Fremote, r.Flocal, true, false)
|
||||
err := MoveDir(r.Fremote, r.Flocal, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -1093,7 +1040,7 @@ func TestMoveWithoutDeleteEmptySrcDirs(t *testing.T) {
|
||||
file2 := r.WriteFile("nested/sub dir/file", "nested", t1)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err := MoveDir(r.Fremote, r.Flocal, false, false)
|
||||
err := MoveDir(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -1154,41 +1101,20 @@ func TestServerSideMoveOverlap(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
// Subdir move with no filters should return ErrorCantMoveOverlapping
|
||||
err = MoveDir(FremoteMove, r.Fremote, false, false)
|
||||
assert.EqualError(t, err, fs.ErrorOverlapping.Error())
|
||||
err = MoveDir(FremoteMove, r.Fremote, false)
|
||||
assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error())
|
||||
|
||||
// Now try with a filter which should also fail with ErrorCantMoveOverlapping
|
||||
filter.Active.Opt.MinSize = 40
|
||||
defer func() {
|
||||
filter.Active.Opt.MinSize = -1
|
||||
}()
|
||||
err = MoveDir(FremoteMove, r.Fremote, false, false)
|
||||
assert.EqualError(t, err, fs.ErrorOverlapping.Error())
|
||||
}
|
||||
|
||||
// Test a sync with overlap
|
||||
func TestSyncOverlap(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
subRemoteName := r.FremoteName + "/rclone-sync-test"
|
||||
FremoteSync, err := fs.NewFs(subRemoteName)
|
||||
require.NoError(t, err)
|
||||
|
||||
checkErr := func(err error) {
|
||||
require.Error(t, err)
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
assert.Equal(t, fs.ErrorOverlapping.Error(), err.Error())
|
||||
}
|
||||
|
||||
checkErr(Sync(FremoteSync, r.Fremote, false))
|
||||
checkErr(Sync(r.Fremote, FremoteSync, false))
|
||||
checkErr(Sync(r.Fremote, r.Fremote, false))
|
||||
checkErr(Sync(FremoteSync, FremoteSync, false))
|
||||
err = MoveDir(FremoteMove, r.Fremote, false)
|
||||
assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error())
|
||||
}
|
||||
|
||||
// Test with BackupDir set
|
||||
func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
|
||||
func testSyncBackupDir(t *testing.T, suffix string) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
@@ -1199,18 +1125,16 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
|
||||
|
||||
fs.Config.BackupDir = r.FremoteName + "/backup"
|
||||
fs.Config.Suffix = suffix
|
||||
fs.Config.SuffixKeepExtension = suffixKeepExtension
|
||||
defer func() {
|
||||
fs.Config.BackupDir = ""
|
||||
fs.Config.Suffix = ""
|
||||
fs.Config.SuffixKeepExtension = false
|
||||
}()
|
||||
|
||||
// Make the setup so we have one, two, three in the dest
|
||||
// and one (different), two (same) in the source
|
||||
file1 := r.WriteObject("dst/one", "one", t1)
|
||||
file2 := r.WriteObject("dst/two", "two", t1)
|
||||
file3 := r.WriteObject("dst/three.txt", "three", t1)
|
||||
file3 := r.WriteObject("dst/three", "three", t1)
|
||||
file2a := r.WriteFile("two", "two", t1)
|
||||
file1a := r.WriteFile("one", "oneA", t2)
|
||||
|
||||
@@ -1221,7 +1145,7 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
|
||||
require.NoError(t, err)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(fdst, r.Flocal, false)
|
||||
err = Sync(fdst, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// one should be moved to the backup dir and the new one installed
|
||||
@@ -1229,24 +1153,20 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
|
||||
file1a.Path = "dst/one"
|
||||
// two should be unchanged
|
||||
// three should be moved to the backup dir
|
||||
if suffixKeepExtension {
|
||||
file3.Path = "backup/three" + suffix + ".txt"
|
||||
} else {
|
||||
file3.Path = "backup/three.txt" + suffix
|
||||
}
|
||||
file3.Path = "backup/three" + suffix
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a)
|
||||
|
||||
// Now check what happens if we do it again
|
||||
// Restore a different three and update one in the source
|
||||
file3a := r.WriteObject("dst/three.txt", "threeA", t2)
|
||||
file3a := r.WriteObject("dst/three", "threeA", t2)
|
||||
file1b := r.WriteFile("one", "oneBB", t3)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a, file3a)
|
||||
|
||||
// This should delete three and overwrite one again, checking
|
||||
// the files got overwritten correctly in backup-dir
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(fdst, r.Flocal, false)
|
||||
err = Sync(fdst, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// one should be moved to the backup dir and the new one installed
|
||||
@@ -1254,17 +1174,12 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
|
||||
file1b.Path = "dst/one"
|
||||
// two should be unchanged
|
||||
// three should be moved to the backup dir
|
||||
if suffixKeepExtension {
|
||||
file3a.Path = "backup/three" + suffix + ".txt"
|
||||
} else {
|
||||
file3a.Path = "backup/three.txt" + suffix
|
||||
}
|
||||
file3a.Path = "backup/three" + suffix
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1b, file2, file3a, file1a)
|
||||
}
|
||||
func TestSyncBackupDir(t *testing.T) { testSyncBackupDir(t, "", false) }
|
||||
func TestSyncBackupDirWithSuffix(t *testing.T) { testSyncBackupDir(t, ".bak", false) }
|
||||
func TestSyncBackupDirWithSuffixKeepExtension(t *testing.T) { testSyncBackupDir(t, "-2019-01-01", true) }
|
||||
func TestSyncBackupDir(t *testing.T) { testSyncBackupDir(t, "") }
|
||||
func TestSyncBackupDirWithSuffix(t *testing.T) { testSyncBackupDir(t, ".bak") }
|
||||
|
||||
// Check we can sync two files with differing UTF-8 representations
|
||||
func TestSyncUTFNorm(t *testing.T) {
|
||||
@@ -1288,7 +1203,7 @@ func TestSyncUTFNorm(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file, but kept the
|
||||
@@ -1314,7 +1229,7 @@ func TestSyncImmutable(t *testing.T) {
|
||||
|
||||
// Should succeed
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
@@ -1326,7 +1241,7 @@ func TestSyncImmutable(t *testing.T) {
|
||||
|
||||
// Should fail with ErrorImmutableModified and not modify local or remote files
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
assert.EqualError(t, err, fs.ErrorImmutableModified.Error())
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
@@ -1362,6 +1277,6 @@ func TestAbort(t *testing.T) {
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
assert.Equal(t, accounting.ErrorMaxTransferLimitReached, err)
|
||||
}
|
||||
|
||||
262
fs/walk/walk.go
262
fs/walk/walk.go
@@ -51,7 +51,7 @@ type Func func(path string, entries fs.DirEntries, err error) error
|
||||
//
|
||||
// Parent directories are always listed before their children
|
||||
//
|
||||
// This is implemented by WalkR if Config.UseUseListR is true
|
||||
// This is implemented by WalkR if Config.UseRecursiveListing is true
|
||||
// and f supports it and level > 1, or WalkN otherwise.
|
||||
//
|
||||
// If --files-from is set then a DirTree will be constructed with just
|
||||
@@ -62,265 +62,12 @@ func Walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
|
||||
if filter.Active.HaveFilesFrom() {
|
||||
return walkR(f, path, includeAll, maxLevel, fn, filter.Active.MakeListR(f.NewObject))
|
||||
}
|
||||
// FIXME should this just be maxLevel < 0 - why the maxLevel > 1
|
||||
if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil {
|
||||
return walkListR(f, path, includeAll, maxLevel, fn)
|
||||
}
|
||||
return walkListDirSorted(f, path, includeAll, maxLevel, fn)
|
||||
}
|
||||
|
||||
// ListType is uses to choose which combination of files or directories is requires
|
||||
type ListType byte
|
||||
|
||||
// Types of listing for ListR
|
||||
const (
|
||||
ListObjects ListType = 1 << iota // list objects only
|
||||
ListDirs // list dirs only
|
||||
ListAll = ListObjects | ListDirs // list files and dirs
|
||||
)
|
||||
|
||||
// Objects returns true if the list type specifies objects
|
||||
func (l ListType) Objects() bool {
|
||||
return (l & ListObjects) != 0
|
||||
}
|
||||
|
||||
// Dirs returns true if the list type specifies dirs
|
||||
func (l ListType) Dirs() bool {
|
||||
return (l & ListDirs) != 0
|
||||
}
|
||||
|
||||
// Filter in (inplace) to only contain the type of list entry required
|
||||
func (l ListType) Filter(in *fs.DirEntries) {
|
||||
if l == ListAll {
|
||||
return
|
||||
}
|
||||
out := (*in)[:0]
|
||||
for _, entry := range *in {
|
||||
switch entry.(type) {
|
||||
case fs.Object:
|
||||
if l.Objects() {
|
||||
out = append(out, entry)
|
||||
}
|
||||
case fs.Directory:
|
||||
if l.Dirs() {
|
||||
out = append(out, entry)
|
||||
}
|
||||
default:
|
||||
fs.Errorf(nil, "Unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
*in = out
|
||||
}
|
||||
|
||||
// ListR lists the directory recursively.
|
||||
//
|
||||
// If includeAll is not set it will use the filters defined.
|
||||
//
|
||||
// If maxLevel is < 0 then it will recurse indefinitely, else it will
|
||||
// only do maxLevel levels.
|
||||
//
|
||||
// If synthesizeDirs is set then for bucket based remotes it will
|
||||
// synthesize directories from the file structure. This uses extra
|
||||
// memory so don't set this if you don't need directories, likewise do
|
||||
// set this if you are interested in directories.
|
||||
//
|
||||
// It calls fn for each tranche of DirEntries read. Note that these
|
||||
// don't necessarily represent a directory
|
||||
//
|
||||
// Note that fn will not be called concurrently whereas the directory
|
||||
// listing will proceed concurrently.
|
||||
//
|
||||
// Directories are not listed in any particular order so you can't
|
||||
// rely on parents coming before children or alphabetical ordering
|
||||
//
|
||||
// This is implemented by using ListR on the backend if possible and
|
||||
// efficient, otherwise by Walk.
|
||||
//
|
||||
// NB (f, path) to be replaced by fs.Dir at some point
|
||||
func ListR(f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error {
|
||||
// FIXME disable this with --no-fast-list ??? `--disable ListR` will do it...
|
||||
doListR := f.Features().ListR
|
||||
|
||||
// Can't use ListR if...
|
||||
if doListR == nil || // ...no ListR
|
||||
filter.Active.HaveFilesFrom() || // ...using --files-from
|
||||
maxLevel >= 0 || // ...using bounded recursion
|
||||
len(filter.Active.Opt.ExcludeFile) > 0 || // ...using --exclude-file
|
||||
filter.Active.BoundedRecursion() { // ...filters imply bounded recursion
|
||||
return listRwalk(f, path, includeAll, maxLevel, listType, fn)
|
||||
}
|
||||
return listR(f, path, includeAll, listType, fn, doListR, listType.Dirs() && f.Features().BucketBased)
|
||||
}
|
||||
|
||||
// listRwalk walks the file tree for ListR using Walk
|
||||
func listRwalk(f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error {
|
||||
var listErr error
|
||||
walkErr := Walk(f, path, includeAll, maxLevel, func(path string, entries fs.DirEntries, err error) error {
|
||||
// Carry on listing but return the error at the end
|
||||
if err != nil {
|
||||
listErr = err
|
||||
fs.CountError(err)
|
||||
fs.Errorf(path, "error listing: %v", err)
|
||||
return nil
|
||||
}
|
||||
listType.Filter(&entries)
|
||||
return fn(entries)
|
||||
})
|
||||
if listErr != nil {
|
||||
return listErr
|
||||
}
|
||||
return walkErr
|
||||
}
|
||||
|
||||
// dirMap keeps track of directories made for bucket based remotes.
|
||||
// true => directory has been sent
|
||||
// false => directory has been seen but not sent
|
||||
type dirMap struct {
|
||||
mu sync.Mutex
|
||||
m map[string]bool
|
||||
root string
|
||||
}
|
||||
|
||||
// make a new dirMap
|
||||
func newDirMap(root string) *dirMap {
|
||||
return &dirMap{
|
||||
m: make(map[string]bool),
|
||||
root: root,
|
||||
}
|
||||
}
|
||||
|
||||
// add adds a directory and parents with sent
|
||||
func (dm *dirMap) add(dir string, sent bool) {
|
||||
for {
|
||||
if dir == dm.root || dir == "" {
|
||||
return
|
||||
}
|
||||
currentSent, found := dm.m[dir]
|
||||
if found {
|
||||
// If it has been sent already then nothing more to do
|
||||
if currentSent {
|
||||
return
|
||||
}
|
||||
// If not sent already don't override
|
||||
if !sent {
|
||||
return
|
||||
}
|
||||
// currenSent == false && sent == true so needs overriding
|
||||
}
|
||||
dm.m[dir] = sent
|
||||
// Add parents in as unsent
|
||||
dir = parentDir(dir)
|
||||
sent = false
|
||||
}
|
||||
}
|
||||
|
||||
// add all the directories in entries and their parents to the dirMap
|
||||
func (dm *dirMap) addEntries(entries fs.DirEntries) error {
|
||||
dm.mu.Lock()
|
||||
defer dm.mu.Unlock()
|
||||
for _, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
dm.add(parentDir(x.Remote()), false)
|
||||
case fs.Directory:
|
||||
dm.add(x.Remote(), true)
|
||||
default:
|
||||
return errors.Errorf("unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// send any missing parents to fn
|
||||
func (dm *dirMap) sendEntries(fn fs.ListRCallback) (err error) {
|
||||
// Count the strings first so we allocate the minimum memory
|
||||
n := 0
|
||||
for _, sent := range dm.m {
|
||||
if !sent {
|
||||
n++
|
||||
}
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
dirs := make([]string, 0, n)
|
||||
// Fill the dirs up then sort it
|
||||
for dir, sent := range dm.m {
|
||||
if !sent {
|
||||
dirs = append(dirs, dir)
|
||||
}
|
||||
}
|
||||
sort.Strings(dirs)
|
||||
// Now convert to bulkier Dir in batches and send
|
||||
now := time.Now()
|
||||
list := NewListRHelper(fn)
|
||||
for _, dir := range dirs {
|
||||
err = list.Add(fs.NewDir(dir, now))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// listR walks the file tree using ListR
|
||||
func listR(f fs.Fs, path string, includeAll bool, listType ListType, fn fs.ListRCallback, doListR fs.ListRFn, synthesizeDirs bool) error {
|
||||
includeDirectory := filter.Active.IncludeDirectory(f)
|
||||
if !includeAll {
|
||||
includeAll = filter.Active.InActive()
|
||||
}
|
||||
var dm *dirMap
|
||||
if synthesizeDirs {
|
||||
dm = newDirMap(path)
|
||||
}
|
||||
var mu sync.Mutex
|
||||
err := doListR(path, func(entries fs.DirEntries) (err error) {
|
||||
if synthesizeDirs {
|
||||
err = dm.addEntries(entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
listType.Filter(&entries)
|
||||
if !includeAll {
|
||||
filteredEntries := entries[:0]
|
||||
for _, entry := range entries {
|
||||
var include bool
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
include = filter.Active.IncludeObject(x)
|
||||
case fs.Directory:
|
||||
include, err = includeDirectory(x.Remote())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("unknown object type %T", entry)
|
||||
}
|
||||
if include {
|
||||
filteredEntries = append(filteredEntries, entry)
|
||||
} else {
|
||||
fs.Debugf(entry, "Excluded from sync (and deletion)")
|
||||
}
|
||||
}
|
||||
entries = filteredEntries
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return fn(entries)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if synthesizeDirs {
|
||||
err = dm.sendEntries(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// walkListDirSorted lists the directory.
|
||||
//
|
||||
// It implements Walk using non recursive directory listing.
|
||||
@@ -759,9 +506,12 @@ func walkR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR f
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAll runs ListR getting all the results
|
||||
// GetAll runs Walk getting all the results
|
||||
func GetAll(f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) {
|
||||
err = ListR(f, path, includeAll, maxLevel, ListAll, func(entries fs.DirEntries) error {
|
||||
err = Walk(f, path, includeAll, maxLevel, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
|
||||
@@ -2,15 +2,12 @@ package walk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/filter"
|
||||
"github.com/ncw/rclone/fstest/mockdir"
|
||||
"github.com/ncw/rclone/fstest/mockfs"
|
||||
"github.com/ncw/rclone/fstest/mockobject"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -637,307 +634,3 @@ b/c/d/
|
||||
// Set to default value, to avoid side effects
|
||||
filter.Active.Opt.ExcludeFile = ""
|
||||
}
|
||||
|
||||
func TestListType(t *testing.T) {
|
||||
assert.Equal(t, true, ListObjects.Objects())
|
||||
assert.Equal(t, false, ListObjects.Dirs())
|
||||
assert.Equal(t, false, ListDirs.Objects())
|
||||
assert.Equal(t, true, ListDirs.Dirs())
|
||||
assert.Equal(t, true, ListAll.Objects())
|
||||
assert.Equal(t, true, ListAll.Dirs())
|
||||
|
||||
var (
|
||||
a = mockobject.Object("a")
|
||||
b = mockobject.Object("b")
|
||||
dir = mockdir.New("dir")
|
||||
adir = mockobject.Object("dir/a")
|
||||
dir2 = mockdir.New("dir2")
|
||||
origEntries = fs.DirEntries{
|
||||
a, b, dir, adir, dir2,
|
||||
}
|
||||
dirEntries = fs.DirEntries{
|
||||
dir, dir2,
|
||||
}
|
||||
objEntries = fs.DirEntries{
|
||||
a, b, adir,
|
||||
}
|
||||
)
|
||||
copyOrigEntries := func() (out fs.DirEntries) {
|
||||
out = make(fs.DirEntries, len(origEntries))
|
||||
copy(out, origEntries)
|
||||
return out
|
||||
}
|
||||
|
||||
got := copyOrigEntries()
|
||||
ListAll.Filter(&got)
|
||||
assert.Equal(t, origEntries, got)
|
||||
|
||||
got = copyOrigEntries()
|
||||
ListObjects.Filter(&got)
|
||||
assert.Equal(t, objEntries, got)
|
||||
|
||||
got = copyOrigEntries()
|
||||
ListDirs.Filter(&got)
|
||||
assert.Equal(t, dirEntries, got)
|
||||
}
|
||||
|
||||
func TestListR(t *testing.T) {
|
||||
objects := fs.DirEntries{
|
||||
mockobject.Object("a"),
|
||||
mockobject.Object("b"),
|
||||
mockdir.New("dir"),
|
||||
mockobject.Object("dir/a"),
|
||||
mockobject.Object("dir/b"),
|
||||
mockobject.Object("dir/c"),
|
||||
}
|
||||
f := mockfs.NewFs("mock", "/")
|
||||
var got []string
|
||||
clearCallback := func() {
|
||||
got = nil
|
||||
}
|
||||
callback := func(entries fs.DirEntries) error {
|
||||
for _, entry := range entries {
|
||||
got = append(got, entry.Remote())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
doListR := func(dir string, callback fs.ListRCallback) error {
|
||||
var os fs.DirEntries
|
||||
for _, o := range objects {
|
||||
if dir == "" || strings.HasPrefix(o.Remote(), dir+"/") {
|
||||
os = append(os, o)
|
||||
}
|
||||
}
|
||||
return callback(os)
|
||||
}
|
||||
|
||||
// Setup filter
|
||||
oldFilter := filter.Active
|
||||
defer func() {
|
||||
filter.Active = oldFilter
|
||||
}()
|
||||
|
||||
var err error
|
||||
filter.Active, err = filter.NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, filter.Active.AddRule("+ b"))
|
||||
require.NoError(t, filter.Active.AddRule("- *"))
|
||||
|
||||
// Base case
|
||||
clearCallback()
|
||||
err = listR(f, "", true, ListAll, callback, doListR, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"a", "b", "dir", "dir/a", "dir/b", "dir/c"}, got)
|
||||
|
||||
// Base case - with Objects
|
||||
clearCallback()
|
||||
err = listR(f, "", true, ListObjects, callback, doListR, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"a", "b", "dir/a", "dir/b", "dir/c"}, got)
|
||||
|
||||
// Base case - with Dirs
|
||||
clearCallback()
|
||||
err = listR(f, "", true, ListDirs, callback, doListR, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"dir"}, got)
|
||||
|
||||
// With filter
|
||||
clearCallback()
|
||||
err = listR(f, "", false, ListAll, callback, doListR, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"b", "dir", "dir/b"}, got)
|
||||
|
||||
// With filter - with Objects
|
||||
clearCallback()
|
||||
err = listR(f, "", false, ListObjects, callback, doListR, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"b", "dir/b"}, got)
|
||||
|
||||
// With filter - with Dir
|
||||
clearCallback()
|
||||
err = listR(f, "", false, ListDirs, callback, doListR, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"dir"}, got)
|
||||
|
||||
// With filter and subdir
|
||||
clearCallback()
|
||||
err = listR(f, "dir", false, ListAll, callback, doListR, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"dir/b"}, got)
|
||||
|
||||
// Now bucket based
|
||||
objects = fs.DirEntries{
|
||||
mockobject.Object("a"),
|
||||
mockobject.Object("b"),
|
||||
mockobject.Object("dir/a"),
|
||||
mockobject.Object("dir/b"),
|
||||
mockobject.Object("dir/subdir/c"),
|
||||
mockdir.New("dir/subdir"),
|
||||
}
|
||||
|
||||
// Base case
|
||||
clearCallback()
|
||||
err = listR(f, "", true, ListAll, callback, doListR, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"a", "b", "dir/a", "dir/b", "dir/subdir/c", "dir/subdir", "dir"}, got)
|
||||
|
||||
// With filter
|
||||
clearCallback()
|
||||
err = listR(f, "", false, ListAll, callback, doListR, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"b", "dir/b", "dir/subdir", "dir"}, got)
|
||||
|
||||
// With filter and subdir
|
||||
clearCallback()
|
||||
err = listR(f, "dir", false, ListAll, callback, doListR, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"dir/b", "dir/subdir"}, got)
|
||||
|
||||
// With filter and subdir - with Objects
|
||||
clearCallback()
|
||||
err = listR(f, "dir", false, ListObjects, callback, doListR, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"dir/b"}, got)
|
||||
|
||||
// With filter and subdir - with Dirs
|
||||
clearCallback()
|
||||
err = listR(f, "dir", false, ListDirs, callback, doListR, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"dir/subdir"}, got)
|
||||
}
|
||||
|
||||
func TestDirMapAdd(t *testing.T) {
|
||||
type add struct {
|
||||
dir string
|
||||
sent bool
|
||||
}
|
||||
for i, test := range []struct {
|
||||
root string
|
||||
in []add
|
||||
want map[string]bool
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
in: []add{
|
||||
{"", true},
|
||||
},
|
||||
want: map[string]bool{},
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
in: []add{
|
||||
{"a/b/c", true},
|
||||
},
|
||||
want: map[string]bool{
|
||||
"a/b/c": true,
|
||||
"a/b": false,
|
||||
"a": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
in: []add{
|
||||
{"a/b/c", true},
|
||||
{"a/b", true},
|
||||
},
|
||||
want: map[string]bool{
|
||||
"a/b/c": true,
|
||||
"a/b": true,
|
||||
"a": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
in: []add{
|
||||
{"a/b", true},
|
||||
{"a/b/c", false},
|
||||
},
|
||||
want: map[string]bool{
|
||||
"a/b/c": false,
|
||||
"a/b": true,
|
||||
"a": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
root: "root",
|
||||
in: []add{
|
||||
{"root/a/b", true},
|
||||
{"root/a/b/c", false},
|
||||
},
|
||||
want: map[string]bool{
|
||||
"root/a/b/c": false,
|
||||
"root/a/b": true,
|
||||
"root/a": false,
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
dm := newDirMap(test.root)
|
||||
for _, item := range test.in {
|
||||
dm.add(item.dir, item.sent)
|
||||
}
|
||||
assert.Equal(t, test.want, dm.m)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirMapAddEntries(t *testing.T) {
|
||||
dm := newDirMap("")
|
||||
entries := fs.DirEntries{
|
||||
mockobject.Object("dir/a"),
|
||||
mockobject.Object("dir/b"),
|
||||
mockdir.New("dir"),
|
||||
mockobject.Object("dir2/a"),
|
||||
mockobject.Object("dir2/b"),
|
||||
}
|
||||
require.NoError(t, dm.addEntries(entries))
|
||||
assert.Equal(t, map[string]bool{"dir": true, "dir2": false}, dm.m)
|
||||
}
|
||||
|
||||
func TestDirMapSendEntries(t *testing.T) {
|
||||
var got []string
|
||||
clearCallback := func() {
|
||||
got = nil
|
||||
}
|
||||
callback := func(entries fs.DirEntries) error {
|
||||
for _, entry := range entries {
|
||||
got = append(got, entry.Remote())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// general test
|
||||
dm := newDirMap("")
|
||||
entries := fs.DirEntries{
|
||||
mockobject.Object("dir/a"),
|
||||
mockobject.Object("dir/b"),
|
||||
mockdir.New("dir"),
|
||||
mockobject.Object("dir2/a"),
|
||||
mockobject.Object("dir2/b"),
|
||||
mockobject.Object("dir1/a"),
|
||||
mockobject.Object("dir3/b"),
|
||||
}
|
||||
require.NoError(t, dm.addEntries(entries))
|
||||
clearCallback()
|
||||
err := dm.sendEntries(callback)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{
|
||||
"dir1",
|
||||
"dir2",
|
||||
"dir3",
|
||||
}, got)
|
||||
|
||||
// return error from callback
|
||||
callback2 := func(entries fs.DirEntries) error {
|
||||
return io.EOF
|
||||
}
|
||||
err = dm.sendEntries(callback2)
|
||||
require.Equal(t, io.EOF, err)
|
||||
|
||||
// empty
|
||||
dm = newDirMap("")
|
||||
clearCallback()
|
||||
err = dm.sendEntries(callback)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string(nil), got)
|
||||
}
|
||||
|
||||
@@ -468,8 +468,11 @@ func Purge(f fs.Fs) {
|
||||
}
|
||||
if doFallbackPurge {
|
||||
dirs := []string{""}
|
||||
err = walk.ListR(f, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error {
|
||||
var err error
|
||||
err = walk.Walk(f, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
log.Printf("purge walk returned error: %v", err)
|
||||
return nil
|
||||
}
|
||||
entries.ForObject(func(obj fs.Object) {
|
||||
fs.Debugf(f, "Purge object %q", obj.Remote())
|
||||
err = obj.Remove()
|
||||
|
||||
@@ -782,39 +782,6 @@ func Run(t *testing.T, opt *Opt) {
|
||||
TestFsListDirFile2(t)
|
||||
})
|
||||
|
||||
// Test the files are all there with walk.ListR recursive listings
|
||||
t.Run("FsListR", func(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
objs, dirs, err := walk.GetAll(remote, "", true, -1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{
|
||||
"hello_ sausage",
|
||||
"hello_ sausage/êé",
|
||||
"hello_ sausage/êé/Hello, 世界",
|
||||
"hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠",
|
||||
}, dirsToNames(dirs))
|
||||
assert.Equal(t, []string{
|
||||
"file name.txt",
|
||||
"hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠/z.txt",
|
||||
}, objsToNames(objs))
|
||||
})
|
||||
|
||||
// Test the files are all there with
|
||||
// walk.ListR recursive listings on a sub dir
|
||||
t.Run("FsListRSubdir", func(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
objs, dirs, err := walk.GetAll(remote, path.Dir(path.Dir(path.Dir(path.Dir(file2.Path)))), true, -1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{
|
||||
"hello_ sausage/êé",
|
||||
"hello_ sausage/êé/Hello, 世界",
|
||||
"hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠",
|
||||
}, dirsToNames(dirs))
|
||||
assert.Equal(t, []string{
|
||||
"hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠/z.txt",
|
||||
}, objsToNames(objs))
|
||||
})
|
||||
|
||||
// TestFsListDirRoot tests that DirList works in the root
|
||||
TestFsListDirRoot := func(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
|
||||
@@ -139,7 +139,13 @@ func newRunIndividual(t *testing.T, individual bool) *Run {
|
||||
*r = *oneRun
|
||||
r.cleanRemote = func() {
|
||||
var toDelete []string
|
||||
err := walk.ListR(r.Fremote, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error {
|
||||
err := walk.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil
|
||||
}
|
||||
t.Fatalf("Error listing: %v", err)
|
||||
}
|
||||
for _, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
|
||||
@@ -8,7 +8,6 @@ tests:
|
||||
- path: fs/sync
|
||||
subdir: true
|
||||
fastlist: true
|
||||
- path: vfs
|
||||
backends:
|
||||
# - backend: "amazonclouddrive"
|
||||
# remote: "TestAmazonCloudDrive:"
|
||||
@@ -139,7 +138,3 @@ backends:
|
||||
remote: "TestUnion:"
|
||||
subdir: false
|
||||
fastlist: false
|
||||
- backend: "koofr"
|
||||
remote: "TestKoofr:"
|
||||
subdir: false
|
||||
fastlist: false
|
||||
|
||||
13
go.mod
13
go.mod
@@ -17,18 +17,16 @@ require (
|
||||
github.com/djherbis/times v1.2.0
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 // indirect
|
||||
github.com/goftp/server v0.0.0-20190304020633-eabccc535b5a
|
||||
github.com/goftp/server v0.0.0-20190111142836-88de73f463af
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jlaffaye/ftp v0.0.0-20190126081051-8019e6774408
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect
|
||||
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
github.com/ncw/swift v1.0.46
|
||||
github.com/ncw/swift v1.0.44
|
||||
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
@@ -44,6 +42,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190205172012-55a226cf41da
|
||||
github.com/thinkhy/go-adb v0.0.0-20190123053734-b4b48de70418
|
||||
github.com/xanzy/ssh-agent v0.2.0
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible
|
||||
go.etcd.io/bbolt v1.3.2 // indirect
|
||||
@@ -51,10 +50,12 @@ require (
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006
|
||||
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952
|
||||
golang.org/x/sys v0.0.0-20190213121743-983097b1a8a3
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c
|
||||
google.golang.org/api v0.1.0
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
)
|
||||
|
||||
replace github.com/thinkhy/go-adb v0.0.0-20190123053734-b4b48de70418 => ../../../github.com/thinkhy/go-adb
|
||||
|
||||
31
go.sum
31
go.sum
@@ -23,6 +23,9 @@ github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4 h1:mK1/QgFPU4osbhjJ26B1w7
|
||||
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg=
|
||||
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
|
||||
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
|
||||
github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5 h1:lmyFvZXNGOmsKCYXNwzDLWafnxeewxsFwdsvTvSC1sg=
|
||||
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5/go.mod h1:DGqLjaZ3ziKKNRt+U5Q9PLWJ52Q/4rxfaaH/b3QYKaE=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
@@ -32,6 +35,7 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
||||
github.com/billziss-gh/cgofuse v1.1.0 h1:tATn9ZDvuPcOVlvR4tJitGHgAqy1y18+4mKmRfdfjec=
|
||||
github.com/billziss-gh/cgofuse v1.1.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/cheggaaa/pb v2.0.6+incompatible/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
@@ -54,8 +58,6 @@ github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 h1:cC0Hbb+18DJ4i
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9/go.mod h1:GpOj6zuVBG3Inr9qjEnuVTgBlk2lZ1S9DcoFiXWyKss=
|
||||
github.com/goftp/server v0.0.0-20190111142836-88de73f463af h1:PJxb1aA1z+Ohy2j28L92+ng9phXpZVFRFbPkfmJcRGo=
|
||||
github.com/goftp/server v0.0.0-20190111142836-88de73f463af/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
|
||||
github.com/goftp/server v0.0.0-20190304020633-eabccc535b5a h1:XTJuuzIub3zu2FgPqdFM9XFYYisXWu2hN/rFwayAIcY=
|
||||
github.com/goftp/server v0.0.0-20190304020633-eabccc535b5a/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
@@ -77,6 +79,10 @@ github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
@@ -89,10 +95,6 @@ github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVY
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro=
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a h1:W+gnfphB7WpRj0rbTF40e3edULfri4fou2kUFw6AF3A=
|
||||
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a/go.mod h1:3xszwh+rNrYk1r9SStc4iJ326gne1OaBcrdB1ACsbzI=
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff h1:GlfzG8bgyoJYz+5sMvGpYnHrg4veNVNnDGuE9hTEMHk=
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff/go.mod h1:MRAz4Gsxd+OzrZ0owwrUHc0zLESL+1Y5syqK/sJxK2A=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
@@ -101,6 +103,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
@@ -109,8 +113,6 @@ github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zX
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
|
||||
github.com/ncw/swift v1.0.44 h1:EKvOTvUxElbpDWqxsyVaVGvc2IfuOqQnRmjnR2AGhQ4=
|
||||
github.com/ncw/swift v1.0.44/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/ncw/swift v1.0.46 h1:ewnoFKEI9f2LT+gqmeeiJ1SCzOBDTcK3JF1XziR85QQ=
|
||||
github.com/ncw/swift v1.0.46/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840=
|
||||
@@ -177,6 +179,7 @@ github.com/spf13/cobra v0.0.4-0.20190109003409-7547e83b2d85/go.mod h1:1l0Ry5zgKv
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
@@ -186,6 +189,8 @@ github.com/t3rm1n4l/go-mega v0.0.0-20190205172012-55a226cf41da/go.mod h1:XWL4vDy
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
|
||||
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
|
||||
github.com/yosemite-open/go-adb v0.0.0-20181206003817-d40962019194 h1:hQ7oP/X/5JR3gGKEEEZU3uihkDePXlFoTwr0XDu5CKg=
|
||||
github.com/yosemite-open/go-adb v0.0.0-20181206003817-d40962019194/go.mod h1:OoY1zUwKq/hv/6hBuQxzSRNu1XZ289eXaDNgoHa+3lU=
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible h1:/Z0q3/eSMoPYAuRmhjWtuGSmVVciFC6hfm3yfCKuvz0=
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible/go.mod h1:w6wqLDQ5bBTzxGJ55581UrSwLrsTAsdo9N6yX/8d9RY=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
@@ -225,6 +230,10 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952 h1:FDfvYgoVsA7TTZSbgiqjAbfPbK47CNHdWl3h/PJtii0=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190213121743-983097b1a8a3 h1:+KlxhGbYkFs8lMfwKn+2ojry1ID5eBSMXprS2u/wqCE=
|
||||
golang.org/x/sys v0.0.0-20190213121743-983097b1a8a3/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
|
||||
@@ -252,12 +261,18 @@ google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
gopkg.in/VividCortex/ewma.v1 v1.1.1/go.mod h1:TekXuFipeiHWiAlO1+wSS23vTcyFau5u3rxXUSXj710=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v2 v2.0.6/go.mod h1:0CiZ1p8pvtxBlQpLXkHuUTpdJ1shm3OqCF1QugkjHL4=
|
||||
gopkg.in/fatih/color.v1 v1.7.0/go.mod h1:P7yosIhqIl/sX8J8UypY5M+dDpD2KmyfP5IRs5v/fo0=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/mattn/go-colorable.v0 v0.1.0/go.mod h1:BVJlBXzARQxdi3nZo6f6bnl5yR20/tOL6p+V0KejgSY=
|
||||
gopkg.in/mattn/go-isatty.v0 v0.0.4/go.mod h1:wt691ab7g0X4ilKZNmMII3egK0bTxl37fEn/Fwbd8gc=
|
||||
gopkg.in/mattn/go-runewidth.v0 v0.0.4/go.mod h1:BmXejnxvhwdaATwiJbB1vZ2dtXkQKZGu9yLFCZb4msQ=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
|
||||
@@ -13,31 +13,23 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
fns = make(map[FnHandle]bool)
|
||||
fnsMutex sync.Mutex
|
||||
fns []func()
|
||||
exitChan chan os.Signal
|
||||
exitOnce sync.Once
|
||||
registerOnce sync.Once
|
||||
)
|
||||
|
||||
// FnHandle is the type of the handle returned by function `Register`
|
||||
// that can be used to unregister an at-exit function
|
||||
type FnHandle *func()
|
||||
|
||||
// Register a function to be called on exit.
|
||||
// Returns a handle which can be used to unregister the function with `Unregister`.
|
||||
func Register(fn func()) FnHandle {
|
||||
fnsMutex.Lock()
|
||||
fns[&fn] = true
|
||||
fnsMutex.Unlock()
|
||||
|
||||
// Run AtExit handlers on exitSignals so everything gets tidied up properly
|
||||
// Register a function to be called on exit
|
||||
func Register(fn func()) {
|
||||
fns = append(fns, fn)
|
||||
// Run AtExit handlers on SIGINT or SIGTERM so everything gets
|
||||
// tidied up properly
|
||||
registerOnce.Do(func() {
|
||||
exitChan = make(chan os.Signal, 1)
|
||||
signal.Notify(exitChan, exitSignals...)
|
||||
signal.Notify(exitChan, os.Interrupt) // syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT
|
||||
go func() {
|
||||
sig := <-exitChan
|
||||
if sig == nil {
|
||||
sig, closed := <-exitChan
|
||||
if closed || sig == nil {
|
||||
return
|
||||
}
|
||||
fs.Infof(nil, "Signal received: %s", sig)
|
||||
@@ -46,15 +38,6 @@ func Register(fn func()) FnHandle {
|
||||
os.Exit(0)
|
||||
}()
|
||||
})
|
||||
|
||||
return &fn
|
||||
}
|
||||
|
||||
// Unregister a function using the handle returned by `Register`
|
||||
func Unregister(handle FnHandle) {
|
||||
fnsMutex.Lock()
|
||||
defer fnsMutex.Unlock()
|
||||
delete(fns, handle)
|
||||
}
|
||||
|
||||
// IgnoreSignals disables the signal handler and prevents Run from beeing executed automatically
|
||||
@@ -70,10 +53,8 @@ func IgnoreSignals() {
|
||||
// Run all the at exit functions if they haven't been run already
|
||||
func Run() {
|
||||
exitOnce.Do(func() {
|
||||
fnsMutex.Lock()
|
||||
defer fnsMutex.Unlock()
|
||||
for fnHandle := range fns {
|
||||
(*fnHandle)()
|
||||
for _, fn := range fns {
|
||||
fn()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
//+build windows plan9
|
||||
|
||||
package atexit
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
var exitSignals = []os.Signal{os.Interrupt}
|
||||
@@ -1,10 +0,0 @@
|
||||
//+build !windows,!plan9
|
||||
|
||||
package atexit
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var exitSignals = []os.Signal{syscall.SIGINT, syscall.SIGTERM} // Not syscall.SIGQUIT as we want the default behaviour
|
||||
@@ -1,76 +0,0 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// New returns an error that formats as the given text.
|
||||
func New(text string) error {
|
||||
return errors.New(text)
|
||||
}
|
||||
|
||||
// Errorf formats according to a format specifier and returns the string
|
||||
// as a value that satisfies error.
|
||||
func Errorf(format string, a ...interface{}) error {
|
||||
return fmt.Errorf(format, a...)
|
||||
}
|
||||
|
||||
// WalkFunc is the signature of the Walk callback function. The function gets the
|
||||
// current error in the chain and should return true if the chain processing
|
||||
// should be aborted.
|
||||
type WalkFunc func(error) bool
|
||||
|
||||
// Walk invokes the given function for each error in the chain. If the
|
||||
// provided functions returns true or no further cause can be found, the process
|
||||
// is stopped and no further calls will be made.
|
||||
//
|
||||
// The next error in the chain is determined by the following rules:
|
||||
// - If the current error has a `Cause() error` method (github.com/pkg/errors),
|
||||
// the return value of this method is used.
|
||||
// - If the current error has a `Unwrap() error` method (golang.org/x/xerrors),
|
||||
// the return value of this method is used.
|
||||
// - Common errors in the Go runtime that contain an Err field will use this value.
|
||||
func Walk(err error, f WalkFunc) {
|
||||
for prev := err; err != nil; prev = err {
|
||||
if f(err) {
|
||||
return
|
||||
}
|
||||
|
||||
switch e := err.(type) {
|
||||
case causer:
|
||||
err = e.Cause()
|
||||
case wrapper:
|
||||
err = e.Unwrap()
|
||||
default:
|
||||
// Unpack any struct or *struct with a field of name Err which satisfies
|
||||
// the error interface. This includes *url.Error, *net.OpError,
|
||||
// *os.SyscallError and many others in the stdlib.
|
||||
errType := reflect.TypeOf(err)
|
||||
errValue := reflect.ValueOf(err)
|
||||
if errValue.IsValid() && errType.Kind() == reflect.Ptr {
|
||||
errType = errType.Elem()
|
||||
errValue = errValue.Elem()
|
||||
}
|
||||
if errValue.IsValid() && errType.Kind() == reflect.Struct {
|
||||
if errField := errValue.FieldByName("Err"); errField.IsValid() {
|
||||
errFieldValue := errField.Interface()
|
||||
if newErr, ok := errFieldValue.(error); ok {
|
||||
err = newErr
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == prev {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
type wrapper interface {
|
||||
Unwrap() error
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
package errors_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/ncw/rclone/lib/errors"
|
||||
)
|
||||
|
||||
func TestWalk(t *testing.T) {
|
||||
origin := errors.New("origin")
|
||||
|
||||
for _, test := range []struct {
|
||||
err error
|
||||
calls int
|
||||
last error
|
||||
}{
|
||||
{causerError{nil}, 1, causerError{nil}},
|
||||
{wrapperError{nil}, 1, wrapperError{nil}},
|
||||
{reflectError{nil}, 1, reflectError{nil}},
|
||||
{causerError{origin}, 2, origin},
|
||||
{wrapperError{origin}, 2, origin},
|
||||
{reflectError{origin}, 2, origin},
|
||||
{causerError{reflectError{origin}}, 3, origin},
|
||||
{wrapperError{causerError{origin}}, 3, origin},
|
||||
{reflectError{wrapperError{origin}}, 3, origin},
|
||||
{causerError{reflectError{causerError{origin}}}, 4, origin},
|
||||
{wrapperError{causerError{wrapperError{origin}}}, 4, origin},
|
||||
{reflectError{wrapperError{reflectError{origin}}}, 4, origin},
|
||||
|
||||
{stopError{nil}, 1, stopError{nil}},
|
||||
{stopError{causerError{nil}}, 1, stopError{causerError{nil}}},
|
||||
{stopError{wrapperError{nil}}, 1, stopError{wrapperError{nil}}},
|
||||
{stopError{reflectError{nil}}, 1, stopError{reflectError{nil}}},
|
||||
{causerError{stopError{origin}}, 2, stopError{origin}},
|
||||
{wrapperError{stopError{origin}}, 2, stopError{origin}},
|
||||
{reflectError{stopError{origin}}, 2, stopError{origin}},
|
||||
{causerError{reflectError{stopError{nil}}}, 3, stopError{nil}},
|
||||
{wrapperError{causerError{stopError{nil}}}, 3, stopError{nil}},
|
||||
{reflectError{wrapperError{stopError{nil}}}, 3, stopError{nil}},
|
||||
} {
|
||||
var last error
|
||||
calls := 0
|
||||
errors.Walk(test.err, func(err error) bool {
|
||||
calls++
|
||||
last = err
|
||||
_, stop := err.(stopError)
|
||||
return stop
|
||||
})
|
||||
assert.Equal(t, test.calls, calls)
|
||||
assert.Equal(t, test.last, last)
|
||||
}
|
||||
}
|
||||
|
||||
type causerError struct {
|
||||
err error
|
||||
}
|
||||
type wrapperError struct {
|
||||
err error
|
||||
}
|
||||
type reflectError struct {
|
||||
Err error
|
||||
}
|
||||
type stopError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e causerError) Error() string {
|
||||
return fmt.Sprintf("causerError(%s)", e.err)
|
||||
}
|
||||
func (e causerError) Cause() error {
|
||||
return e.err
|
||||
}
|
||||
func (e wrapperError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
func (e wrapperError) Error() string {
|
||||
return fmt.Sprintf("wrapperError(%s)", e.err)
|
||||
}
|
||||
func (e reflectError) Error() string {
|
||||
return fmt.Sprintf("reflectError(%s)", e.Err)
|
||||
}
|
||||
func (e stopError) Error() string {
|
||||
return fmt.Sprintf("stopError(%s)", e.err)
|
||||
}
|
||||
func (e stopError) Cause() error {
|
||||
return e.err
|
||||
}
|
||||
@@ -2,69 +2,74 @@
|
||||
package pacer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/lib/errors"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// State represents the public Pacer state that will be passed to the
|
||||
// configured Calculator
|
||||
type State struct {
|
||||
SleepTime time.Duration // current time to sleep before adding the pacer token back
|
||||
ConsecutiveRetries int // number of consecutive retries, will be 0 when the last invoker call returned false
|
||||
LastError error // the error returned by the last invoker call or nil
|
||||
}
|
||||
|
||||
// Calculator is a generic calculation function for a Pacer.
|
||||
type Calculator interface {
|
||||
// Calculate takes the current Pacer state and returns the sleep time after which
|
||||
// the next Pacer call will be done.
|
||||
Calculate(state State) time.Duration
|
||||
}
|
||||
|
||||
// Pacer is the primary type of the pacer package. It allows to retry calls
|
||||
// with a configurable delay in between.
|
||||
// Pacer state
|
||||
type Pacer struct {
|
||||
pacerOptions
|
||||
mu sync.Mutex // Protecting read/writes
|
||||
pacer chan struct{} // To pace the operations
|
||||
connTokens chan struct{} // Connection tokens
|
||||
state State
|
||||
}
|
||||
type pacerOptions struct {
|
||||
maxConnections int // Maximum number of concurrent connections
|
||||
retries int // Max number of retries
|
||||
calculator Calculator // switchable pacing algorithm - call with mu held
|
||||
invoker InvokerFunc // wrapper function used to invoke the target function
|
||||
mu sync.Mutex // Protecting read/writes
|
||||
minSleep time.Duration // minimum sleep time
|
||||
maxSleep time.Duration // maximum sleep time
|
||||
burst int // number of calls to send without rate limiting
|
||||
limiter *rate.Limiter // rate limiter for the minsleep
|
||||
decayConstant uint // decay constant
|
||||
attackConstant uint // attack constant
|
||||
pacer chan struct{} // To pace the operations
|
||||
sleepTime time.Duration // Time to sleep for each transaction
|
||||
retries int // Max number of retries
|
||||
maxConnections int // Maximum number of concurrent connections
|
||||
connTokens chan struct{} // Connection tokens
|
||||
calculatePace func(bool) // switchable pacing algorithm - call with mu held
|
||||
consecutiveRetries int // number of consecutive retries
|
||||
}
|
||||
|
||||
// InvokerFunc is the signature of the wrapper function used to invoke the
|
||||
// target function in Pacer.
|
||||
type InvokerFunc func(try, tries int, f Paced) (bool, error)
|
||||
// Type is for selecting different pacing algorithms
|
||||
type Type int
|
||||
|
||||
// Option can be used in New to configure the Pacer.
|
||||
type Option func(*pacerOptions)
|
||||
const (
|
||||
// DefaultPacer is a truncated exponential attack and decay.
|
||||
//
|
||||
// On retries the sleep time is doubled, on non errors then
|
||||
// sleeptime decays according to the decay constant as set
|
||||
// with SetDecayConstant.
|
||||
//
|
||||
// The sleep never goes below that set with SetMinSleep or
|
||||
// above that set with SetMaxSleep.
|
||||
DefaultPacer = Type(iota)
|
||||
|
||||
// CalculatorOption sets a Calculator for the new Pacer.
|
||||
func CalculatorOption(c Calculator) Option {
|
||||
return func(p *pacerOptions) { p.calculator = c }
|
||||
}
|
||||
// AmazonCloudDrivePacer is a specialised pacer for Amazon Drive
|
||||
//
|
||||
// It implements a truncated exponential backoff strategy with
|
||||
// randomization. Normally operations are paced at the
|
||||
// interval set with SetMinSleep. On errors the sleep timer
|
||||
// is set to 0..2**retries seconds.
|
||||
//
|
||||
// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
|
||||
AmazonCloudDrivePacer
|
||||
|
||||
// RetriesOption sets the retries number for the new Pacer.
|
||||
func RetriesOption(retries int) Option {
|
||||
return func(p *pacerOptions) { p.retries = retries }
|
||||
}
|
||||
// GoogleDrivePacer is a specialised pacer for Google Drive
|
||||
//
|
||||
// It implements a truncated exponential backoff strategy with
|
||||
// randomization. Normally operations are paced at the
|
||||
// interval set with SetMinSleep. On errors the sleep timer
|
||||
// is set to (2 ^ n) + random_number_milliseconds seconds
|
||||
//
|
||||
// See https://developers.google.com/drive/v2/web/handle-errors#exponential-backoff
|
||||
GoogleDrivePacer
|
||||
|
||||
// MaxConnectionsOption sets the maximum connections number for the new Pacer.
|
||||
func MaxConnectionsOption(maxConnections int) Option {
|
||||
return func(p *pacerOptions) { p.maxConnections = maxConnections }
|
||||
}
|
||||
|
||||
// InvokerOption sets a InvokerFunc for the new Pacer.
|
||||
func InvokerOption(invoker InvokerFunc) Option {
|
||||
return func(p *pacerOptions) { p.invoker = invoker }
|
||||
}
|
||||
// S3Pacer is a specialised pacer for S3
|
||||
//
|
||||
// It is basically the defaultPacer, but allows the sleep time to go to 0
|
||||
// when things are going well.
|
||||
S3Pacer
|
||||
)
|
||||
|
||||
// Paced is a function which is called by the Call and CallNoRetry
|
||||
// methods. It should return a boolean, true if it would like to be
|
||||
@@ -72,27 +77,19 @@ func InvokerOption(invoker InvokerFunc) Option {
|
||||
// wrapped in a RetryError.
|
||||
type Paced func() (bool, error)
|
||||
|
||||
// New returns a Pacer with sensible defaults.
|
||||
func New(options ...Option) *Pacer {
|
||||
opts := pacerOptions{
|
||||
maxConnections: 10,
|
||||
retries: 3,
|
||||
}
|
||||
for _, o := range options {
|
||||
o(&opts)
|
||||
}
|
||||
// New returns a Pacer with sensible defaults
|
||||
func New() *Pacer {
|
||||
p := &Pacer{
|
||||
pacerOptions: opts,
|
||||
pacer: make(chan struct{}, 1),
|
||||
maxSleep: 2 * time.Second,
|
||||
decayConstant: 2,
|
||||
attackConstant: 1,
|
||||
retries: fs.Config.LowLevelRetries,
|
||||
pacer: make(chan struct{}, 1),
|
||||
}
|
||||
if p.calculator == nil {
|
||||
p.SetCalculator(nil)
|
||||
}
|
||||
p.state.SleepTime = p.calculator.Calculate(p.state)
|
||||
if p.invoker == nil {
|
||||
p.invoker = invoke
|
||||
}
|
||||
p.SetMaxConnections(p.maxConnections)
|
||||
p.sleepTime = p.minSleep
|
||||
p.SetPacer(DefaultPacer)
|
||||
p.SetMaxConnections(fs.Config.Checkers + fs.Config.Transfers)
|
||||
p.SetMinSleep(10 * time.Millisecond)
|
||||
|
||||
// Put the first pacing token in
|
||||
p.pacer <- struct{}{}
|
||||
@@ -100,11 +97,54 @@ func New(options ...Option) *Pacer {
|
||||
return p
|
||||
}
|
||||
|
||||
// SetSleep sets the current sleep time
|
||||
func (p *Pacer) SetSleep(t time.Duration) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.sleepTime = t
|
||||
return p
|
||||
}
|
||||
|
||||
// GetSleep gets the current sleep time
|
||||
func (p *Pacer) GetSleep() time.Duration {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
return p.sleepTime
|
||||
}
|
||||
|
||||
// SetMinSleep sets the minimum sleep time for the pacer
|
||||
func (p *Pacer) SetMinSleep(t time.Duration) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.minSleep = t
|
||||
p.sleepTime = p.minSleep
|
||||
p.limiter = rate.NewLimiter(rate.Every(p.minSleep), p.burst)
|
||||
return p
|
||||
}
|
||||
|
||||
// SetBurst sets the burst with no limiting of the pacer
|
||||
func (p *Pacer) SetBurst(n int) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.burst = n
|
||||
p.limiter = rate.NewLimiter(rate.Every(p.minSleep), p.burst)
|
||||
return p
|
||||
}
|
||||
|
||||
// SetMaxSleep sets the maximum sleep time for the pacer
|
||||
func (p *Pacer) SetMaxSleep(t time.Duration) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.maxSleep = t
|
||||
p.sleepTime = p.minSleep
|
||||
return p
|
||||
}
|
||||
|
||||
// SetMaxConnections sets the maximum number of concurrent connections.
|
||||
// Setting the value to 0 will allow unlimited number of connections.
|
||||
// Should not be changed once you have started calling the pacer.
|
||||
// By default this will be set to fs.Config.Checkers.
|
||||
func (p *Pacer) SetMaxConnections(n int) {
|
||||
func (p *Pacer) SetMaxConnections(n int) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.maxConnections = n
|
||||
@@ -116,34 +156,61 @@ func (p *Pacer) SetMaxConnections(n int) {
|
||||
p.connTokens <- struct{}{}
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// SetRetries sets the max number of retries for Call
|
||||
func (p *Pacer) SetRetries(retries int) {
|
||||
// SetDecayConstant sets the decay constant for the pacer
|
||||
//
|
||||
// This is the speed the time falls back to the minimum after errors
|
||||
// have occurred.
|
||||
//
|
||||
// bigger for slower decay, exponential. 1 is halve, 0 is go straight to minimum
|
||||
func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.decayConstant = decay
|
||||
return p
|
||||
}
|
||||
|
||||
// SetAttackConstant sets the attack constant for the pacer
|
||||
//
|
||||
// This is the speed the time grows from the minimum after errors have
|
||||
// occurred.
|
||||
//
|
||||
// bigger for slower attack, 1 is double, 0 is go straight to maximum
|
||||
func (p *Pacer) SetAttackConstant(attack uint) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.attackConstant = attack
|
||||
return p
|
||||
}
|
||||
|
||||
// SetRetries sets the max number of tries for Call
|
||||
func (p *Pacer) SetRetries(retries int) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.retries = retries
|
||||
return p
|
||||
}
|
||||
|
||||
// SetCalculator sets the pacing algorithm. Don't modify the Calculator object
|
||||
// afterwards, use the ModifyCalculator method when needed.
|
||||
// SetPacer sets the pacing algorithm
|
||||
//
|
||||
// It will choose the default algorithm if nil is passed in.
|
||||
func (p *Pacer) SetCalculator(c Calculator) {
|
||||
// It will choose the default algorithm if an incorrect value is
|
||||
// passed in.
|
||||
func (p *Pacer) SetPacer(t Type) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if c == nil {
|
||||
c = NewDefault()
|
||||
switch t {
|
||||
case AmazonCloudDrivePacer:
|
||||
p.calculatePace = p.acdPacer
|
||||
case GoogleDrivePacer:
|
||||
p.calculatePace = p.drivePacer
|
||||
case S3Pacer:
|
||||
p.calculatePace = p.s3Pacer
|
||||
default:
|
||||
p.calculatePace = p.defaultPacer
|
||||
}
|
||||
p.calculator = c
|
||||
}
|
||||
|
||||
// ModifyCalculator calls the given function with the currently configured
|
||||
// Calculator and the Pacer lock held.
|
||||
func (p *Pacer) ModifyCalculator(f func(Calculator)) {
|
||||
p.mu.Lock()
|
||||
f(p.calculator)
|
||||
p.mu.Unlock()
|
||||
return p
|
||||
}
|
||||
|
||||
// Start a call to the API
|
||||
@@ -163,29 +230,170 @@ func (p *Pacer) beginCall() {
|
||||
|
||||
p.mu.Lock()
|
||||
// Restart the timer
|
||||
go func(t time.Duration) {
|
||||
time.Sleep(t)
|
||||
go func(sleepTime, minSleep time.Duration) {
|
||||
// fs.Debugf(f, "New sleep for %v at %v", t, time.Now())
|
||||
// Sleep the minimum time with the rate limiter
|
||||
if minSleep > 0 && sleepTime >= minSleep {
|
||||
_ = p.limiter.Wait(context.Background())
|
||||
sleepTime -= minSleep
|
||||
}
|
||||
// Then sleep the remaining time
|
||||
if sleepTime > 0 {
|
||||
time.Sleep(sleepTime)
|
||||
}
|
||||
p.pacer <- struct{}{}
|
||||
}(p.state.SleepTime)
|
||||
}(p.sleepTime, p.minSleep)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// exponentialImplementation implements a exponentialImplementation up
|
||||
// and down pacing algorithm
|
||||
//
|
||||
// See the description for DefaultPacer
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) defaultPacer(retry bool) {
|
||||
oldSleepTime := p.sleepTime
|
||||
if retry {
|
||||
if p.attackConstant == 0 {
|
||||
p.sleepTime = p.maxSleep
|
||||
} else {
|
||||
p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
|
||||
}
|
||||
if p.sleepTime > p.maxSleep {
|
||||
p.sleepTime = p.maxSleep
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
|
||||
if p.sleepTime < p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// acdPacer implements a truncated exponential backoff
|
||||
// strategy with randomization for Amazon Drive
|
||||
//
|
||||
// See the description for AmazonCloudDrivePacer
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) acdPacer(retry bool) {
|
||||
consecutiveRetries := p.consecutiveRetries
|
||||
if consecutiveRetries == 0 {
|
||||
if p.sleepTime != p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
if consecutiveRetries > 9 {
|
||||
consecutiveRetries = 9
|
||||
}
|
||||
// consecutiveRetries starts at 1 so
|
||||
// maxSleep is 2**(consecutiveRetries-1) seconds
|
||||
maxSleep := time.Second << uint(consecutiveRetries-1)
|
||||
// actual sleep is random from 0..maxSleep
|
||||
p.sleepTime = time.Duration(rand.Int63n(int64(maxSleep)))
|
||||
if p.sleepTime < p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
}
|
||||
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
|
||||
}
|
||||
}
|
||||
|
||||
// drivePacer implements a truncated exponential backoff strategy with
|
||||
// randomization for Google Drive
|
||||
//
|
||||
// See the description for GoogleDrivePacer
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) drivePacer(retry bool) {
|
||||
consecutiveRetries := p.consecutiveRetries
|
||||
if consecutiveRetries == 0 {
|
||||
if p.sleepTime != p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
if consecutiveRetries > 5 {
|
||||
consecutiveRetries = 5
|
||||
}
|
||||
// consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16
|
||||
// maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds
|
||||
p.sleepTime = time.Second<<uint(consecutiveRetries-1) + time.Duration(rand.Int63n(int64(time.Second)))
|
||||
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
|
||||
}
|
||||
}
|
||||
|
||||
// s3Pacer implements a pacer compatible with our expectations of S3, where it tries to not
|
||||
// delay at all between successful calls, but backs off in the default fashion in response
|
||||
// to any errors.
|
||||
// The assumption is that errors should be exceedingly rare (S3 seems to have largely solved
|
||||
// the sort of scability questions rclone is likely to run into), and in the happy case
|
||||
// it can handle calls with no delays between them.
|
||||
//
|
||||
// Basically defaultPacer, but with some handling of sleepTime going to/from 0ms
|
||||
// Ignores minSleep entirely
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) s3Pacer(retry bool) {
|
||||
oldSleepTime := p.sleepTime
|
||||
if retry {
|
||||
if p.attackConstant == 0 {
|
||||
p.sleepTime = p.maxSleep
|
||||
} else {
|
||||
if p.sleepTime == 0 {
|
||||
p.sleepTime = p.minSleep
|
||||
} else {
|
||||
p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
|
||||
}
|
||||
}
|
||||
if p.sleepTime > p.maxSleep {
|
||||
p.sleepTime = p.maxSleep
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
|
||||
if p.sleepTime < p.minSleep {
|
||||
p.sleepTime = 0
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// endCall implements the pacing algorithm
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
func (p *Pacer) endCall(retry bool, err error) {
|
||||
func (p *Pacer) endCall(retry bool) {
|
||||
if p.maxConnections > 0 {
|
||||
p.connTokens <- struct{}{}
|
||||
}
|
||||
p.mu.Lock()
|
||||
if retry {
|
||||
p.state.ConsecutiveRetries++
|
||||
p.consecutiveRetries++
|
||||
} else {
|
||||
p.state.ConsecutiveRetries = 0
|
||||
p.consecutiveRetries = 0
|
||||
}
|
||||
p.state.LastError = err
|
||||
p.state.SleepTime = p.calculator.Calculate(p.state)
|
||||
p.calculatePace(retry)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -194,11 +402,15 @@ func (p *Pacer) call(fn Paced, retries int) (err error) {
|
||||
var retry bool
|
||||
for i := 1; i <= retries; i++ {
|
||||
p.beginCall()
|
||||
retry, err = p.invoker(i, retries, fn)
|
||||
p.endCall(retry, err)
|
||||
retry, err = fn()
|
||||
p.endCall(retry)
|
||||
if !retry {
|
||||
break
|
||||
}
|
||||
fs.Debugf("pacer", "low level retry %d/%d (error %v)", i, retries, err)
|
||||
}
|
||||
if retry {
|
||||
err = fserrors.RetryError(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -224,41 +436,3 @@ func (p *Pacer) Call(fn Paced) (err error) {
|
||||
func (p *Pacer) CallNoRetry(fn Paced) error {
|
||||
return p.call(fn, 1)
|
||||
}
|
||||
|
||||
func invoke(try, tries int, f Paced) (bool, error) {
|
||||
return f()
|
||||
}
|
||||
|
||||
type retryAfterError struct {
|
||||
error
|
||||
retryAfter time.Duration
|
||||
}
|
||||
|
||||
func (r *retryAfterError) Error() string {
|
||||
return r.error.Error()
|
||||
}
|
||||
|
||||
func (r *retryAfterError) Cause() error {
|
||||
return r.error
|
||||
}
|
||||
|
||||
// RetryAfterError returns a wrapped error that can be used by Calculator implementations
|
||||
func RetryAfterError(err error, retryAfter time.Duration) error {
|
||||
return &retryAfterError{
|
||||
error: err,
|
||||
retryAfter: retryAfter,
|
||||
}
|
||||
}
|
||||
|
||||
// IsRetryAfter returns true if the the error or any of it's Cause's is an error
|
||||
// returned by RetryAfterError. It also returns the associated Duration if possible.
|
||||
func IsRetryAfter(err error) (retryAfter time.Duration, isRetryAfter bool) {
|
||||
errors.Walk(err, func(err error) bool {
|
||||
if r, ok := err.(*retryAfterError); ok {
|
||||
retryAfter, isRetryAfter = r.retryAfter, true
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,85 +1,181 @@
|
||||
package pacer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
const expectedRetries = 7
|
||||
const expectedConnections = 9
|
||||
p := New(RetriesOption(expectedRetries), MaxConnectionsOption(expectedConnections))
|
||||
if d, ok := p.calculator.(*Default); ok {
|
||||
assert.Equal(t, 10*time.Millisecond, d.minSleep)
|
||||
assert.Equal(t, 2*time.Second, d.maxSleep)
|
||||
assert.Equal(t, d.minSleep, p.state.SleepTime)
|
||||
assert.Equal(t, uint(2), d.decayConstant)
|
||||
assert.Equal(t, uint(1), d.attackConstant)
|
||||
} else {
|
||||
t.Errorf("calculator")
|
||||
fs.Config.LowLevelRetries = expectedRetries
|
||||
p := New()
|
||||
if p.minSleep != 10*time.Millisecond {
|
||||
t.Errorf("minSleep")
|
||||
}
|
||||
if p.maxSleep != 2*time.Second {
|
||||
t.Errorf("maxSleep")
|
||||
}
|
||||
if p.sleepTime != p.minSleep {
|
||||
t.Errorf("sleepTime")
|
||||
}
|
||||
if p.retries != expectedRetries {
|
||||
t.Errorf("retries want %v got %v", expectedRetries, p.retries)
|
||||
}
|
||||
if p.decayConstant != 2 {
|
||||
t.Errorf("decayConstant")
|
||||
}
|
||||
if p.attackConstant != 1 {
|
||||
t.Errorf("attackConstant")
|
||||
}
|
||||
if cap(p.pacer) != 1 {
|
||||
t.Errorf("pacer 1")
|
||||
}
|
||||
if len(p.pacer) != 1 {
|
||||
t.Errorf("pacer 2")
|
||||
}
|
||||
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.defaultPacer) {
|
||||
t.Errorf("calculatePace")
|
||||
}
|
||||
if p.maxConnections != fs.Config.Checkers+fs.Config.Transfers {
|
||||
t.Errorf("maxConnections")
|
||||
}
|
||||
if cap(p.connTokens) != fs.Config.Checkers+fs.Config.Transfers {
|
||||
t.Errorf("connTokens")
|
||||
}
|
||||
if p.consecutiveRetries != 0 {
|
||||
t.Errorf("consecutiveRetries")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetSleep(t *testing.T) {
|
||||
p := New().SetSleep(2 * time.Millisecond)
|
||||
if p.sleepTime != 2*time.Millisecond {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSleep(t *testing.T) {
|
||||
p := New().SetSleep(2 * time.Millisecond)
|
||||
if p.GetSleep() != 2*time.Millisecond {
|
||||
t.Errorf("didn't get")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetMinSleep(t *testing.T) {
|
||||
p := New().SetMinSleep(1 * time.Millisecond)
|
||||
if p.minSleep != 1*time.Millisecond {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetMaxSleep(t *testing.T) {
|
||||
p := New().SetMaxSleep(100 * time.Second)
|
||||
if p.maxSleep != 100*time.Second {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
assert.Equal(t, expectedRetries, p.retries)
|
||||
assert.Equal(t, 1, cap(p.pacer))
|
||||
assert.Equal(t, 1, len(p.pacer))
|
||||
assert.Equal(t, expectedConnections, p.maxConnections)
|
||||
assert.Equal(t, expectedConnections, cap(p.connTokens))
|
||||
assert.Equal(t, 0, p.state.ConsecutiveRetries)
|
||||
}
|
||||
|
||||
func TestMaxConnections(t *testing.T) {
|
||||
p := New()
|
||||
p.SetMaxConnections(20)
|
||||
assert.Equal(t, 20, p.maxConnections)
|
||||
assert.Equal(t, 20, cap(p.connTokens))
|
||||
p := New().SetMaxConnections(20)
|
||||
if p.maxConnections != 20 {
|
||||
t.Errorf("maxConnections")
|
||||
}
|
||||
if cap(p.connTokens) != 20 {
|
||||
t.Errorf("connTokens")
|
||||
}
|
||||
p.SetMaxConnections(0)
|
||||
assert.Equal(t, 0, p.maxConnections)
|
||||
assert.Nil(t, p.connTokens)
|
||||
if p.maxConnections != 0 {
|
||||
t.Errorf("maxConnections is not 0")
|
||||
}
|
||||
if p.connTokens != nil {
|
||||
t.Errorf("connTokens is not nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDecayConstant(t *testing.T) {
|
||||
p := New().SetDecayConstant(17)
|
||||
if p.decayConstant != 17 {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecay(t *testing.T) {
|
||||
c := NewDefault(MinSleep(1*time.Microsecond), MaxSleep(1*time.Second))
|
||||
p := New().SetMinSleep(time.Microsecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second)
|
||||
for _, test := range []struct {
|
||||
in State
|
||||
in time.Duration
|
||||
attackConstant uint
|
||||
want time.Duration
|
||||
}{
|
||||
{State{SleepTime: 8 * time.Millisecond}, 1, 4 * time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Millisecond}, 0, 1 * time.Microsecond},
|
||||
{State{SleepTime: 1 * time.Millisecond}, 2, (3 * time.Millisecond) / 4},
|
||||
{State{SleepTime: 1 * time.Millisecond}, 3, (7 * time.Millisecond) / 8},
|
||||
{8 * time.Millisecond, 1, 4 * time.Millisecond},
|
||||
{1 * time.Millisecond, 0, time.Microsecond},
|
||||
{1 * time.Millisecond, 2, (3 * time.Millisecond) / 4},
|
||||
{1 * time.Millisecond, 3, (7 * time.Millisecond) / 8},
|
||||
} {
|
||||
c.decayConstant = test.attackConstant
|
||||
got := c.Calculate(test.in)
|
||||
assert.Equal(t, test.want, got, "test: %+v", test)
|
||||
p.sleepTime = test.in
|
||||
p.SetDecayConstant(test.attackConstant)
|
||||
p.defaultPacer(false)
|
||||
got := p.sleepTime
|
||||
if got != test.want {
|
||||
t.Errorf("bad sleep want %v got %v", test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetAttackConstant(t *testing.T) {
|
||||
p := New().SetAttackConstant(19)
|
||||
if p.attackConstant != 19 {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttack(t *testing.T) {
|
||||
c := NewDefault(MinSleep(1*time.Microsecond), MaxSleep(1*time.Second))
|
||||
p := New().SetMinSleep(time.Microsecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second)
|
||||
for _, test := range []struct {
|
||||
in State
|
||||
in time.Duration
|
||||
attackConstant uint
|
||||
want time.Duration
|
||||
}{
|
||||
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 1, 2 * time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 0, 1 * time.Second},
|
||||
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 2, (4 * time.Millisecond) / 3},
|
||||
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 3, (8 * time.Millisecond) / 7},
|
||||
{1 * time.Millisecond, 1, 2 * time.Millisecond},
|
||||
{1 * time.Millisecond, 0, time.Second},
|
||||
{1 * time.Millisecond, 2, (4 * time.Millisecond) / 3},
|
||||
{1 * time.Millisecond, 3, (8 * time.Millisecond) / 7},
|
||||
} {
|
||||
c.attackConstant = test.attackConstant
|
||||
got := c.Calculate(test.in)
|
||||
assert.Equal(t, test.want, got, "test: %+v", test)
|
||||
p.sleepTime = test.in
|
||||
p.SetAttackConstant(test.attackConstant)
|
||||
p.defaultPacer(true)
|
||||
got := p.sleepTime
|
||||
if got != test.want {
|
||||
t.Errorf("bad sleep want %v got %v", test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSetRetries(t *testing.T) {
|
||||
p := New()
|
||||
p.SetRetries(18)
|
||||
assert.Equal(t, 18, p.retries)
|
||||
p := New().SetRetries(18)
|
||||
if p.retries != 18 {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetPacer(t *testing.T) {
|
||||
p := New().SetPacer(AmazonCloudDrivePacer)
|
||||
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.acdPacer) {
|
||||
t.Errorf("calculatePace is not acdPacer")
|
||||
}
|
||||
p.SetPacer(GoogleDrivePacer)
|
||||
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.drivePacer) {
|
||||
t.Errorf("calculatePace is not drivePacer")
|
||||
}
|
||||
p.SetPacer(DefaultPacer)
|
||||
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.defaultPacer) {
|
||||
t.Errorf("calculatePace is not defaultPacer")
|
||||
}
|
||||
}
|
||||
|
||||
// emptyTokens empties the pacer of all its tokens
|
||||
@@ -104,7 +200,7 @@ func waitForPace(p *Pacer, duration time.Duration) (when time.Time) {
|
||||
}
|
||||
|
||||
func TestBeginCall(t *testing.T) {
|
||||
p := New(MaxConnectionsOption(10), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond))))
|
||||
p := New().SetMaxConnections(10).SetMinSleep(1 * time.Millisecond)
|
||||
emptyTokens(p)
|
||||
go p.beginCall()
|
||||
if !waitForPace(p, 10*time.Millisecond).IsZero() {
|
||||
@@ -127,7 +223,7 @@ func TestBeginCall(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBeginCallZeroConnections(t *testing.T) {
|
||||
p := New(MaxConnectionsOption(0), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond))))
|
||||
p := New().SetMaxConnections(0).SetMinSleep(1 * time.Millisecond)
|
||||
emptyTokens(p)
|
||||
go p.beginCall()
|
||||
if !waitForPace(p, 10*time.Millisecond).IsZero() {
|
||||
@@ -145,143 +241,155 @@ func TestBeginCallZeroConnections(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDefaultPacer(t *testing.T) {
|
||||
c := NewDefault(MinSleep(1*time.Millisecond), MaxSleep(1*time.Second), DecayConstant(2))
|
||||
p := New().SetMinSleep(time.Millisecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second).SetDecayConstant(2)
|
||||
for _, test := range []struct {
|
||||
state State
|
||||
in time.Duration
|
||||
retry bool
|
||||
want time.Duration
|
||||
}{
|
||||
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 2 * time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 1 * time.Second},
|
||||
{State{SleepTime: (3 * time.Second) / 4, ConsecutiveRetries: 1}, 1 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second}, 750 * time.Millisecond},
|
||||
{State{SleepTime: 1000 * time.Microsecond}, 1 * time.Millisecond},
|
||||
{State{SleepTime: 1200 * time.Microsecond}, 1 * time.Millisecond},
|
||||
{time.Millisecond, true, 2 * time.Millisecond},
|
||||
{time.Second, true, time.Second},
|
||||
{(3 * time.Second) / 4, true, time.Second},
|
||||
{time.Second, false, 750 * time.Millisecond},
|
||||
{1000 * time.Microsecond, false, time.Millisecond},
|
||||
{1200 * time.Microsecond, false, time.Millisecond},
|
||||
} {
|
||||
got := c.Calculate(test.state)
|
||||
assert.Equal(t, test.want, got, "test: %+v", test)
|
||||
p.sleepTime = test.in
|
||||
p.defaultPacer(test.retry)
|
||||
got := p.sleepTime
|
||||
if got != test.want {
|
||||
t.Errorf("bad sleep want %v got %v", test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAmazonCloudDrivePacer(t *testing.T) {
|
||||
c := NewAmazonCloudDrive(MinSleep(1 * time.Millisecond))
|
||||
p := New().SetMinSleep(time.Millisecond).SetPacer(AmazonCloudDrivePacer).SetMaxSleep(time.Second).SetDecayConstant(2)
|
||||
// Do lots of times because of the random number!
|
||||
for _, test := range []struct {
|
||||
state State
|
||||
want time.Duration
|
||||
in time.Duration
|
||||
consecutiveRetries int
|
||||
retry bool
|
||||
want time.Duration
|
||||
}{
|
||||
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 0}, 1 * time.Millisecond},
|
||||
{State{SleepTime: 10 * time.Millisecond, ConsecutiveRetries: 0}, 1 * time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 500 * time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 2}, 1 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 3}, 2 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 4}, 4 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 5}, 8 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 6}, 16 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 7}, 32 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 8}, 64 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 9}, 128 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 10}, 128 * time.Second},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 11}, 128 * time.Second},
|
||||
{time.Millisecond, 0, true, time.Millisecond},
|
||||
{10 * time.Millisecond, 0, true, time.Millisecond},
|
||||
{1 * time.Second, 1, true, 500 * time.Millisecond},
|
||||
{1 * time.Second, 2, true, 1 * time.Second},
|
||||
{1 * time.Second, 3, true, 2 * time.Second},
|
||||
{1 * time.Second, 4, true, 4 * time.Second},
|
||||
{1 * time.Second, 5, true, 8 * time.Second},
|
||||
{1 * time.Second, 6, true, 16 * time.Second},
|
||||
{1 * time.Second, 7, true, 32 * time.Second},
|
||||
{1 * time.Second, 8, true, 64 * time.Second},
|
||||
{1 * time.Second, 9, true, 128 * time.Second},
|
||||
{1 * time.Second, 10, true, 128 * time.Second},
|
||||
{1 * time.Second, 11, true, 128 * time.Second},
|
||||
} {
|
||||
const n = 1000
|
||||
var sum time.Duration
|
||||
// measure average time over n cycles
|
||||
for i := 0; i < n; i++ {
|
||||
sum += c.Calculate(test.state)
|
||||
p.sleepTime = test.in
|
||||
p.consecutiveRetries = test.consecutiveRetries
|
||||
p.acdPacer(test.retry)
|
||||
sum += p.sleepTime
|
||||
}
|
||||
got := sum / n
|
||||
assert.False(t, got < (test.want*9)/10 || got > (test.want*11)/10, "test: %+v", test)
|
||||
//t.Logf("%+v: got = %v", test, got)
|
||||
if got < (test.want*9)/10 || got > (test.want*11)/10 {
|
||||
t.Fatalf("%+v: bad sleep want %v+/-10%% got %v", test, test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoogleDrivePacer(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetPacer(GoogleDrivePacer).SetMaxSleep(time.Second).SetDecayConstant(2)
|
||||
// Do lots of times because of the random number!
|
||||
for _, test := range []struct {
|
||||
state State
|
||||
want time.Duration
|
||||
in time.Duration
|
||||
consecutiveRetries int
|
||||
retry bool
|
||||
want time.Duration
|
||||
}{
|
||||
{State{SleepTime: 1 * time.Millisecond}, 0},
|
||||
{State{SleepTime: 10 * time.Millisecond}, 0},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 1*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 2}, 2*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 3}, 4*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 4}, 8*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 5}, 16*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 6}, 16*time.Second + 500*time.Millisecond},
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 7}, 16*time.Second + 500*time.Millisecond},
|
||||
{time.Millisecond, 0, true, time.Millisecond},
|
||||
{10 * time.Millisecond, 0, true, time.Millisecond},
|
||||
{1 * time.Second, 1, true, 1*time.Second + 500*time.Millisecond},
|
||||
{1 * time.Second, 2, true, 2*time.Second + 500*time.Millisecond},
|
||||
{1 * time.Second, 3, true, 4*time.Second + 500*time.Millisecond},
|
||||
{1 * time.Second, 4, true, 8*time.Second + 500*time.Millisecond},
|
||||
{1 * time.Second, 5, true, 16*time.Second + 500*time.Millisecond},
|
||||
{1 * time.Second, 6, true, 16*time.Second + 500*time.Millisecond},
|
||||
{1 * time.Second, 7, true, 16*time.Second + 500*time.Millisecond},
|
||||
} {
|
||||
const n = 1000
|
||||
var sum time.Duration
|
||||
// measure average time over n cycles
|
||||
for i := 0; i < n; i++ {
|
||||
c := NewGoogleDrive(MinSleep(1 * time.Millisecond))
|
||||
sum += c.Calculate(test.state)
|
||||
p.sleepTime = test.in
|
||||
p.consecutiveRetries = test.consecutiveRetries
|
||||
p.drivePacer(test.retry)
|
||||
sum += p.sleepTime
|
||||
}
|
||||
got := sum / n
|
||||
assert.False(t, got < (test.want*9)/10 || got > (test.want*11)/10, "test: %+v, got: %v", test, got)
|
||||
}
|
||||
|
||||
const minSleep = 2 * time.Millisecond
|
||||
for _, test := range []struct {
|
||||
calls int
|
||||
want int
|
||||
}{
|
||||
{1, 0},
|
||||
{9, 0},
|
||||
{10, 0},
|
||||
{11, 1},
|
||||
{12, 2},
|
||||
} {
|
||||
c := NewGoogleDrive(MinSleep(minSleep), Burst(10))
|
||||
count := 0
|
||||
for i := 0; i < test.calls; i++ {
|
||||
sleep := c.Calculate(State{})
|
||||
if sleep != 0 {
|
||||
count++
|
||||
}
|
||||
//t.Logf("%+v: got = %v", test, got)
|
||||
if got < (test.want*9)/10 || got > (test.want*11)/10 {
|
||||
t.Fatalf("%+v: bad sleep want %v+/-10%% got %v", test, test.want, got)
|
||||
}
|
||||
assert.Equalf(t, test.want, count, "test: %+v, got: %v", test, count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3Pacer(t *testing.T) {
|
||||
c := NewS3(MinSleep(10*time.Millisecond), MaxSleep(1*time.Second), DecayConstant(2))
|
||||
p := New().SetMinSleep(10 * time.Millisecond).SetPacer(S3Pacer).SetMaxSleep(time.Second).SetDecayConstant(2)
|
||||
for _, test := range []struct {
|
||||
state State
|
||||
in time.Duration
|
||||
retry bool
|
||||
want time.Duration
|
||||
}{
|
||||
{State{SleepTime: 0, ConsecutiveRetries: 1}, 10 * time.Millisecond}, //Things were going ok, we failed once, back off to minSleep
|
||||
{State{SleepTime: 10 * time.Millisecond, ConsecutiveRetries: 1}, 20 * time.Millisecond}, //Another fail, double the backoff
|
||||
{State{SleepTime: 10 * time.Millisecond}, 0}, //Things start going ok when we're at minSleep; should result in no sleep
|
||||
{State{SleepTime: 12 * time.Millisecond}, 0}, //*near* minsleep and going ok, decay would take below minSleep, should go to 0
|
||||
{State{SleepTime: 0}, 0}, //Things have been going ok; not retrying should keep sleep at 0
|
||||
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 1 * time.Second}, //Check maxSleep is enforced
|
||||
{State{SleepTime: (3 * time.Second) / 4, ConsecutiveRetries: 1}, 1 * time.Second}, //Check attack heading to maxSleep doesn't exceed maxSleep
|
||||
{State{SleepTime: 1 * time.Second}, 750 * time.Millisecond}, //Check decay from maxSleep
|
||||
{State{SleepTime: 48 * time.Millisecond}, 36 * time.Millisecond}, //Check simple decay above minSleep
|
||||
{0, true, 10 * time.Millisecond}, //Things were going ok, we failed once, back off to minSleep
|
||||
{10 * time.Millisecond, true, 20 * time.Millisecond}, //Another fail, double the backoff
|
||||
{10 * time.Millisecond, false, 0}, //Things start going ok when we're at minSleep; should result in no sleep
|
||||
{12 * time.Millisecond, false, 0}, //*near* minsleep and going ok, decay would take below minSleep, should go to 0
|
||||
{0, false, 0}, //Things have been going ok; not retrying should keep sleep at 0
|
||||
{time.Second, true, time.Second}, //Check maxSleep is enforced
|
||||
{(3 * time.Second) / 4, true, time.Second}, //Check attack heading to maxSleep doesn't exceed maxSleep
|
||||
{time.Second, false, 750 * time.Millisecond}, //Check decay from maxSleep
|
||||
{48 * time.Millisecond, false, 36 * time.Millisecond}, //Check simple decay above minSleep
|
||||
} {
|
||||
got := c.Calculate(test.state)
|
||||
assert.Equal(t, test.want, got, "test: %+v", test)
|
||||
p.sleepTime = test.in
|
||||
p.s3Pacer(test.retry)
|
||||
got := p.sleepTime
|
||||
if got != test.want {
|
||||
t.Errorf("bad sleep for %v with retry %v: want %v got %v", test.in, test.retry, test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndCall(t *testing.T) {
|
||||
p := New(MaxConnectionsOption(5))
|
||||
p := New().SetMaxConnections(5)
|
||||
emptyTokens(p)
|
||||
p.state.ConsecutiveRetries = 1
|
||||
p.endCall(true, nil)
|
||||
assert.Equal(t, 1, len(p.connTokens))
|
||||
assert.Equal(t, 2, p.state.ConsecutiveRetries)
|
||||
p.consecutiveRetries = 1
|
||||
p.endCall(true)
|
||||
if len(p.connTokens) != 1 {
|
||||
t.Errorf("Expecting 1 token")
|
||||
}
|
||||
if p.consecutiveRetries != 2 {
|
||||
t.Errorf("Bad consecutive retries")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndCallZeroConnections(t *testing.T) {
|
||||
p := New(MaxConnectionsOption(0))
|
||||
p := New().SetMaxConnections(0)
|
||||
emptyTokens(p)
|
||||
p.state.ConsecutiveRetries = 1
|
||||
p.endCall(false, nil)
|
||||
assert.Equal(t, 0, len(p.connTokens))
|
||||
assert.Equal(t, 0, p.state.ConsecutiveRetries)
|
||||
p.consecutiveRetries = 1
|
||||
p.endCall(false)
|
||||
if len(p.connTokens) != 0 {
|
||||
t.Errorf("Expecting 0 token")
|
||||
}
|
||||
if p.consecutiveRetries != 0 {
|
||||
t.Errorf("Bad consecutive retries")
|
||||
}
|
||||
}
|
||||
|
||||
var errFoo = errors.New("foo")
|
||||
@@ -289,79 +397,67 @@ var errFoo = errors.New("foo")
|
||||
type dummyPaced struct {
|
||||
retry bool
|
||||
called int
|
||||
wait *sync.Cond
|
||||
}
|
||||
|
||||
func (dp *dummyPaced) fn() (bool, error) {
|
||||
if dp.wait != nil {
|
||||
dp.wait.L.Lock()
|
||||
dp.called++
|
||||
dp.wait.Wait()
|
||||
dp.wait.L.Unlock()
|
||||
} else {
|
||||
dp.called++
|
||||
}
|
||||
dp.called++
|
||||
return dp.retry, errFoo
|
||||
}
|
||||
|
||||
func TestCallFixed(t *testing.T) {
|
||||
p := New(CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
|
||||
func Test_callNoRetry(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond)
|
||||
|
||||
dp := &dummyPaced{retry: false}
|
||||
err := p.call(dp.fn, 10)
|
||||
assert.Equal(t, 1, dp.called)
|
||||
assert.Equal(t, errFoo, err)
|
||||
if dp.called != 1 {
|
||||
t.Errorf("called want %d got %d", 1, dp.called)
|
||||
}
|
||||
if err != errFoo {
|
||||
t.Errorf("err want %v got %v", errFoo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_callRetry(t *testing.T) {
|
||||
p := New(CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
|
||||
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond)
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.call(dp.fn, 10)
|
||||
assert.Equal(t, 10, dp.called)
|
||||
assert.Equal(t, errFoo, err)
|
||||
if dp.called != 10 {
|
||||
t.Errorf("called want %d got %d", 10, dp.called)
|
||||
}
|
||||
if err == errFoo {
|
||||
t.Errorf("err didn't want %v got %v", errFoo, err)
|
||||
}
|
||||
_, ok := err.(fserrors.Retrier)
|
||||
if !ok {
|
||||
t.Errorf("didn't return a retry error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCall(t *testing.T) {
|
||||
p := New(RetriesOption(20), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
|
||||
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond).SetRetries(20)
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.Call(dp.fn)
|
||||
assert.Equal(t, 20, dp.called)
|
||||
assert.Equal(t, errFoo, err)
|
||||
if dp.called != 20 {
|
||||
t.Errorf("called want %d got %d", 20, dp.called)
|
||||
}
|
||||
_, ok := err.(fserrors.Retrier)
|
||||
if !ok {
|
||||
t.Errorf("didn't return a retry error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCallParallel(t *testing.T) {
|
||||
p := New(MaxConnectionsOption(3), RetriesOption(1), CalculatorOption(NewDefault(MinSleep(100*time.Microsecond), MaxSleep(1*time.Millisecond))))
|
||||
func TestCallNoRetry(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond).SetRetries(20)
|
||||
|
||||
wait := sync.NewCond(&sync.Mutex{})
|
||||
funcs := make([]*dummyPaced, 5)
|
||||
for i := range funcs {
|
||||
dp := &dummyPaced{wait: wait}
|
||||
funcs[i] = dp
|
||||
go func() {
|
||||
assert.Equal(t, errFoo, p.CallNoRetry(dp.fn))
|
||||
}()
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.CallNoRetry(dp.fn)
|
||||
if dp.called != 1 {
|
||||
t.Errorf("called want %d got %d", 1, dp.called)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
called := 0
|
||||
wait.L.Lock()
|
||||
for _, dp := range funcs {
|
||||
called += dp.called
|
||||
_, ok := err.(fserrors.Retrier)
|
||||
if !ok {
|
||||
t.Errorf("didn't return a retry error")
|
||||
}
|
||||
wait.L.Unlock()
|
||||
|
||||
assert.Equal(t, 3, called)
|
||||
wait.Broadcast()
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
called = 0
|
||||
wait.L.Lock()
|
||||
for _, dp := range funcs {
|
||||
called += dp.called
|
||||
}
|
||||
wait.L.Unlock()
|
||||
|
||||
assert.Equal(t, 5, called)
|
||||
wait.Broadcast()
|
||||
}
|
||||
|
||||
@@ -1,326 +0,0 @@
|
||||
package pacer
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type (
|
||||
// MinSleep configures the minimum sleep time of a Calculator
|
||||
MinSleep time.Duration
|
||||
// MaxSleep configures the maximum sleep time of a Calculator
|
||||
MaxSleep time.Duration
|
||||
// DecayConstant configures the decay constant time of a Calculator
|
||||
DecayConstant uint
|
||||
// AttackConstant configures the attack constant of a Calculator
|
||||
AttackConstant uint
|
||||
// Burst configures the number of API calls to allow without sleeping
|
||||
Burst int
|
||||
)
|
||||
|
||||
// Default is a truncated exponential attack and decay.
|
||||
//
|
||||
// On retries the sleep time is doubled, on non errors then sleeptime decays
|
||||
// according to the decay constant as set with SetDecayConstant.
|
||||
//
|
||||
// The sleep never goes below that set with SetMinSleep or above that set
|
||||
// with SetMaxSleep.
|
||||
type Default struct {
|
||||
minSleep time.Duration // minimum sleep time
|
||||
maxSleep time.Duration // maximum sleep time
|
||||
decayConstant uint // decay constant
|
||||
attackConstant uint // attack constant
|
||||
}
|
||||
|
||||
// DefaultOption is the interface implemented by all options for the Default Calculator
|
||||
type DefaultOption interface {
|
||||
ApplyDefault(*Default)
|
||||
}
|
||||
|
||||
// NewDefault creates a Calculator used by Pacer as the default.
|
||||
func NewDefault(opts ...DefaultOption) *Default {
|
||||
c := &Default{
|
||||
minSleep: 10 * time.Millisecond,
|
||||
maxSleep: 2 * time.Second,
|
||||
decayConstant: 2,
|
||||
attackConstant: 1,
|
||||
}
|
||||
c.Update(opts...)
|
||||
return c
|
||||
}
|
||||
|
||||
// Update applies the Calculator options.
|
||||
func (c *Default) Update(opts ...DefaultOption) {
|
||||
for _, opt := range opts {
|
||||
opt.ApplyDefault(c)
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyDefault updates the value on the Calculator
|
||||
func (o MinSleep) ApplyDefault(c *Default) {
|
||||
c.minSleep = time.Duration(o)
|
||||
}
|
||||
|
||||
// ApplyDefault updates the value on the Calculator
|
||||
func (o MaxSleep) ApplyDefault(c *Default) {
|
||||
c.maxSleep = time.Duration(o)
|
||||
}
|
||||
|
||||
// ApplyDefault updates the value on the Calculator
|
||||
func (o DecayConstant) ApplyDefault(c *Default) {
|
||||
c.decayConstant = uint(o)
|
||||
}
|
||||
|
||||
// ApplyDefault updates the value on the Calculator
|
||||
func (o AttackConstant) ApplyDefault(c *Default) {
|
||||
c.attackConstant = uint(o)
|
||||
}
|
||||
|
||||
// Calculate takes the current Pacer state and return the wait time until the next try.
|
||||
func (c *Default) Calculate(state State) time.Duration {
|
||||
if t, ok := IsRetryAfter(state.LastError); ok {
|
||||
if t < c.minSleep {
|
||||
return c.minSleep
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
if state.ConsecutiveRetries > 0 {
|
||||
sleepTime := c.maxSleep
|
||||
if c.attackConstant != 0 {
|
||||
sleepTime = (state.SleepTime << c.attackConstant) / ((1 << c.attackConstant) - 1)
|
||||
}
|
||||
if sleepTime > c.maxSleep {
|
||||
sleepTime = c.maxSleep
|
||||
}
|
||||
return sleepTime
|
||||
}
|
||||
sleepTime := (state.SleepTime<<c.decayConstant - state.SleepTime) >> c.decayConstant
|
||||
if sleepTime < c.minSleep {
|
||||
sleepTime = c.minSleep
|
||||
}
|
||||
return sleepTime
|
||||
}
|
||||
|
||||
// AmazonCloudDrive is a specialized pacer for Amazon Drive
|
||||
//
|
||||
// It implements a truncated exponential backoff strategy with randomization.
|
||||
// Normally operations are paced at the interval set with SetMinSleep. On errors
|
||||
// the sleep timer is set to 0..2**retries seconds.
|
||||
//
|
||||
// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
|
||||
type AmazonCloudDrive struct {
|
||||
minSleep time.Duration // minimum sleep time
|
||||
}
|
||||
|
||||
// AmazonCloudDriveOption is the interface implemented by all options for the AmazonCloudDrive Calculator
|
||||
type AmazonCloudDriveOption interface {
|
||||
ApplyAmazonCloudDrive(*AmazonCloudDrive)
|
||||
}
|
||||
|
||||
// NewAmazonCloudDrive returns a new AmazonCloudDrive Calculator with default values
|
||||
func NewAmazonCloudDrive(opts ...AmazonCloudDriveOption) *AmazonCloudDrive {
|
||||
c := &AmazonCloudDrive{
|
||||
minSleep: 10 * time.Millisecond,
|
||||
}
|
||||
c.Update(opts...)
|
||||
return c
|
||||
}
|
||||
|
||||
// Update applies the Calculator options.
|
||||
func (c *AmazonCloudDrive) Update(opts ...AmazonCloudDriveOption) {
|
||||
for _, opt := range opts {
|
||||
opt.ApplyAmazonCloudDrive(c)
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyAmazonCloudDrive updates the value on the Calculator
|
||||
func (o MinSleep) ApplyAmazonCloudDrive(c *AmazonCloudDrive) {
|
||||
c.minSleep = time.Duration(o)
|
||||
}
|
||||
|
||||
// Calculate takes the current Pacer state and return the wait time until the next try.
|
||||
func (c *AmazonCloudDrive) Calculate(state State) time.Duration {
|
||||
if t, ok := IsRetryAfter(state.LastError); ok {
|
||||
if t < c.minSleep {
|
||||
return c.minSleep
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
consecutiveRetries := state.ConsecutiveRetries
|
||||
if consecutiveRetries == 0 {
|
||||
return c.minSleep
|
||||
}
|
||||
if consecutiveRetries > 9 {
|
||||
consecutiveRetries = 9
|
||||
}
|
||||
// consecutiveRetries starts at 1 so
|
||||
// maxSleep is 2**(consecutiveRetries-1) seconds
|
||||
maxSleep := time.Second << uint(consecutiveRetries-1)
|
||||
// actual sleep is random from 0..maxSleep
|
||||
sleepTime := time.Duration(rand.Int63n(int64(maxSleep)))
|
||||
if sleepTime < c.minSleep {
|
||||
sleepTime = c.minSleep
|
||||
}
|
||||
return sleepTime
|
||||
}
|
||||
|
||||
// GoogleDrive is a specialized pacer for Google Drive
|
||||
//
|
||||
// It implements a truncated exponential backoff strategy with randomization.
|
||||
// Normally operations are paced at the interval set with SetMinSleep. On errors
|
||||
// the sleep timer is set to (2 ^ n) + random_number_milliseconds seconds.
|
||||
//
|
||||
// See https://developers.google.com/drive/v2/web/handle-errors#exponential-backoff
|
||||
type GoogleDrive struct {
|
||||
minSleep time.Duration // minimum sleep time
|
||||
burst int // number of requests without sleeping
|
||||
limiter *rate.Limiter // rate limiter for the minSleep
|
||||
}
|
||||
|
||||
// GoogleDriveOption is the interface implemented by all options for the GoogleDrive Calculator
|
||||
type GoogleDriveOption interface {
|
||||
ApplyGoogleDrive(*GoogleDrive)
|
||||
}
|
||||
|
||||
// NewGoogleDrive returns a new GoogleDrive Calculator with default values
|
||||
func NewGoogleDrive(opts ...GoogleDriveOption) *GoogleDrive {
|
||||
c := &GoogleDrive{
|
||||
minSleep: 10 * time.Millisecond,
|
||||
burst: 1,
|
||||
}
|
||||
c.Update(opts...)
|
||||
return c
|
||||
}
|
||||
|
||||
// Update applies the Calculator options.
|
||||
func (c *GoogleDrive) Update(opts ...GoogleDriveOption) {
|
||||
for _, opt := range opts {
|
||||
opt.ApplyGoogleDrive(c)
|
||||
}
|
||||
if c.burst <= 0 {
|
||||
c.burst = 1
|
||||
}
|
||||
c.limiter = rate.NewLimiter(rate.Every(c.minSleep), c.burst)
|
||||
}
|
||||
|
||||
// ApplyGoogleDrive updates the value on the Calculator
|
||||
func (o MinSleep) ApplyGoogleDrive(c *GoogleDrive) {
|
||||
c.minSleep = time.Duration(o)
|
||||
}
|
||||
|
||||
// ApplyGoogleDrive updates the value on the Calculator
|
||||
func (o Burst) ApplyGoogleDrive(c *GoogleDrive) {
|
||||
c.burst = int(o)
|
||||
}
|
||||
|
||||
// Calculate takes the current Pacer state and return the wait time until the next try.
|
||||
func (c *GoogleDrive) Calculate(state State) time.Duration {
|
||||
if t, ok := IsRetryAfter(state.LastError); ok {
|
||||
if t < c.minSleep {
|
||||
return c.minSleep
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
consecutiveRetries := state.ConsecutiveRetries
|
||||
if consecutiveRetries == 0 {
|
||||
return c.limiter.Reserve().Delay()
|
||||
}
|
||||
if consecutiveRetries > 5 {
|
||||
consecutiveRetries = 5
|
||||
}
|
||||
// consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16
|
||||
// maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds
|
||||
return time.Second<<uint(consecutiveRetries-1) + time.Duration(rand.Int63n(int64(time.Second)))
|
||||
}
|
||||
|
||||
// S3 implements a pacer compatible with our expectations of S3, where it tries to not
|
||||
// delay at all between successful calls, but backs off in the default fashion in response
|
||||
// to any errors.
|
||||
// The assumption is that errors should be exceedingly rare (S3 seems to have largely solved
|
||||
// the sort of stability questions rclone is likely to run into), and in the happy case
|
||||
// it can handle calls with no delays between them.
|
||||
//
|
||||
// Basically defaultPacer, but with some handling of sleepTime going to/from 0ms
|
||||
type S3 struct {
|
||||
minSleep time.Duration // minimum sleep time
|
||||
maxSleep time.Duration // maximum sleep time
|
||||
decayConstant uint // decay constant
|
||||
attackConstant uint // attack constant
|
||||
}
|
||||
|
||||
// S3Option is the interface implemented by all options for the S3 Calculator
|
||||
type S3Option interface {
|
||||
ApplyS3(*S3)
|
||||
}
|
||||
|
||||
// NewS3 returns a new S3 Calculator with default values
|
||||
func NewS3(opts ...S3Option) *S3 {
|
||||
c := &S3{
|
||||
maxSleep: 2 * time.Second,
|
||||
decayConstant: 2,
|
||||
attackConstant: 1,
|
||||
}
|
||||
c.Update(opts...)
|
||||
return c
|
||||
}
|
||||
|
||||
// Update applies the Calculator options.
|
||||
func (c *S3) Update(opts ...S3Option) {
|
||||
for _, opt := range opts {
|
||||
opt.ApplyS3(c)
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyS3 updates the value on the Calculator
|
||||
func (o MaxSleep) ApplyS3(c *S3) {
|
||||
c.maxSleep = time.Duration(o)
|
||||
}
|
||||
|
||||
// ApplyS3 updates the value on the Calculator
|
||||
func (o MinSleep) ApplyS3(c *S3) {
|
||||
c.minSleep = time.Duration(o)
|
||||
}
|
||||
|
||||
// ApplyS3 updates the value on the Calculator
|
||||
func (o DecayConstant) ApplyS3(c *S3) {
|
||||
c.decayConstant = uint(o)
|
||||
}
|
||||
|
||||
// ApplyS3 updates the value on the Calculator
|
||||
func (o AttackConstant) ApplyS3(c *S3) {
|
||||
c.attackConstant = uint(o)
|
||||
}
|
||||
|
||||
// Calculate takes the current Pacer state and return the wait time until the next try.
|
||||
func (c *S3) Calculate(state State) time.Duration {
|
||||
if t, ok := IsRetryAfter(state.LastError); ok {
|
||||
if t < c.minSleep {
|
||||
return c.minSleep
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
if state.ConsecutiveRetries > 0 {
|
||||
if c.attackConstant == 0 {
|
||||
return c.maxSleep
|
||||
}
|
||||
if state.SleepTime == 0 {
|
||||
return c.minSleep
|
||||
}
|
||||
sleepTime := (state.SleepTime << c.attackConstant) / ((1 << c.attackConstant) - 1)
|
||||
if sleepTime > c.maxSleep {
|
||||
sleepTime = c.maxSleep
|
||||
}
|
||||
return sleepTime
|
||||
}
|
||||
sleepTime := (state.SleepTime<<c.decayConstant - state.SleepTime) >> c.decayConstant
|
||||
if sleepTime < c.minSleep {
|
||||
sleepTime = 0
|
||||
}
|
||||
return sleepTime
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user