1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-15 15:53:41 +00:00

Break the fs package up into smaller parts.

The purpose of this is to make it easier to maintain and eventually to
allow the rclone backends to be re-used in other projects without
having to use the rclone configuration system.

The new code layout is documented in CONTRIBUTING.
This commit is contained in:
Nick Craig-Wood
2018-01-12 16:30:54 +00:00
parent 92624bbbf1
commit 11da2a6c9b
183 changed files with 5749 additions and 5063 deletions

View File

@@ -1,6 +1,5 @@
// Accounting and limiting reader
package fs
// Package accounting providers an accounting and limiting reader
package accounting
import (
"bytes"
@@ -12,6 +11,8 @@ import (
"time"
"github.com/VividCortex/ewma"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/asyncreader"
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.6
"golang.org/x/time/rate"
)
@@ -24,31 +25,36 @@ var (
prevTokenBucket = tokenBucket
bwLimitToggledOff = false
currLimitMu sync.Mutex // protects changes to the timeslot
currLimit BwTimeSlot
currLimit fs.BwTimeSlot
)
func init() {
// Set the function pointer up in fs
fs.CountError = Stats.Error
}
const maxBurstSize = 1 * 1024 * 1024 // must be bigger than the biggest request
// make a new empty token bucket with the bandwidth given
func newTokenBucket(bandwidth SizeSuffix) *rate.Limiter {
func newTokenBucket(bandwidth fs.SizeSuffix) *rate.Limiter {
newTokenBucket := rate.NewLimiter(rate.Limit(bandwidth), maxBurstSize)
// empty the bucket
err := newTokenBucket.WaitN(context.Background(), maxBurstSize)
if err != nil {
Errorf(nil, "Failed to empty token bucket: %v", err)
fs.Errorf(nil, "Failed to empty token bucket: %v", err)
}
return newTokenBucket
}
// Start the token bucket if necessary
func startTokenBucket() {
// StartTokenBucket starts the token bucket if necessary
func StartTokenBucket() {
currLimitMu.Lock()
currLimit := bwLimit.LimitAt(time.Now())
currLimit := fs.Config.BwLimit.LimitAt(time.Now())
currLimitMu.Unlock()
if currLimit.bandwidth > 0 {
tokenBucket = newTokenBucket(currLimit.bandwidth)
Infof(nil, "Starting bandwidth limiter at %vBytes/s", &currLimit.bandwidth)
if currLimit.Bandwidth > 0 {
tokenBucket = newTokenBucket(currLimit.Bandwidth)
fs.Infof(nil, "Starting bandwidth limiter at %vBytes/s", &currLimit.Bandwidth)
// Start the SIGUSR2 signal handler to toggle bandwidth.
// This function does nothing in windows systems.
@@ -56,21 +62,21 @@ func startTokenBucket() {
}
}
// startTokenTicker creates a ticker to update the bandwidth limiter every minute.
func startTokenTicker() {
// StartTokenTicker creates a ticker to update the bandwidth limiter every minute.
func StartTokenTicker() {
// If the timetable has a single entry or was not specified, we don't need
// a ticker to update the bandwidth.
if len(bwLimit) <= 1 {
if len(fs.Config.BwLimit) <= 1 {
return
}
ticker := time.NewTicker(time.Minute)
go func() {
for range ticker.C {
limitNow := bwLimit.LimitAt(time.Now())
limitNow := fs.Config.BwLimit.LimitAt(time.Now())
currLimitMu.Lock()
if currLimit.bandwidth != limitNow.bandwidth {
if currLimit.Bandwidth != limitNow.Bandwidth {
tokenBucketMu.Lock()
// If bwlimit is toggled off, the change should only
@@ -84,17 +90,17 @@ func startTokenTicker() {
}
// Set new bandwidth. If unlimited, set tokenbucket to nil.
if limitNow.bandwidth > 0 {
*targetBucket = newTokenBucket(limitNow.bandwidth)
if limitNow.Bandwidth > 0 {
*targetBucket = newTokenBucket(limitNow.Bandwidth)
if bwLimitToggledOff {
Logf(nil, "Scheduled bandwidth change. "+
"Limit will be set to %vBytes/s when toggled on again.", &limitNow.bandwidth)
fs.Logf(nil, "Scheduled bandwidth change. "+
"Limit will be set to %vBytes/s when toggled on again.", &limitNow.Bandwidth)
} else {
Logf(nil, "Scheduled bandwidth change. Limit set to %vBytes/s", &limitNow.bandwidth)
fs.Logf(nil, "Scheduled bandwidth change. Limit set to %vBytes/s", &limitNow.Bandwidth)
}
} else {
*targetBucket = nil
Logf(nil, "Scheduled bandwidth change. Bandwidth limits disabled")
fs.Logf(nil, "Scheduled bandwidth change. Bandwidth limits disabled")
}
currLimit = limitNow
@@ -117,7 +123,7 @@ type inProgress struct {
// newInProgress makes a new inProgress object
func newInProgress() *inProgress {
return &inProgress{
m: make(map[string]*Account, Config.Transfers),
m: make(map[string]*Account, fs.Config.Transfers),
}
}
@@ -181,8 +187,8 @@ type StatsInfo struct {
// NewStats cretates an initialised StatsInfo
func NewStats() *StatsInfo {
return &StatsInfo{
checking: make(stringSet, Config.Checkers),
transferring: make(stringSet, Config.Transfers),
checking: make(stringSet, fs.Config.Checkers),
transferring: make(stringSet, fs.Config.Transfers),
start: time.Now(),
inProgress: newInProgress(),
}
@@ -201,7 +207,7 @@ func (s *StatsInfo) String() string {
dtRounded := dt - (dt % (time.Second / 10))
buf := &bytes.Buffer{}
if Config.DataRateUnit == "bits" {
if fs.Config.DataRateUnit == "bits" {
speed = speed * 8
}
@@ -212,7 +218,7 @@ Checks: %10d
Transferred: %10d
Elapsed time: %10v
`,
SizeSuffix(s.bytes).Unit("Bytes"), SizeSuffix(speed).Unit(strings.Title(Config.DataRateUnit)+"/s"),
fs.SizeSuffix(s.bytes).Unit("Bytes"), fs.SizeSuffix(speed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
s.errors,
s.checks,
s.transfers,
@@ -228,7 +234,7 @@ Elapsed time: %10v
// Log outputs the StatsInfo to the log
func (s *StatsInfo) Log() {
LogLevelPrintf(Config.StatsLogLevel, nil, "%v\n", s)
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "%v\n", s)
}
// Bytes updates the stats for bytes bytes
@@ -375,7 +381,7 @@ func NewAccountSizeName(in io.ReadCloser, size int64, name string) *Account {
}
// NewAccount makes a Account reader for an object
func NewAccount(in io.ReadCloser, obj Object) *Account {
func NewAccount(in io.ReadCloser, obj fs.Object) *Account {
return NewAccountSizeName(in, obj.Size(), obj.Remote())
}
@@ -383,16 +389,16 @@ func NewAccount(in io.ReadCloser, obj Object) *Account {
func (acc *Account) WithBuffer() *Account {
acc.withBuf = true
var buffers int
if acc.size >= int64(Config.BufferSize) || acc.size == -1 {
buffers = int(int64(Config.BufferSize) / asyncBufferSize)
if acc.size >= int64(fs.Config.BufferSize) || acc.size == -1 {
buffers = int(int64(fs.Config.BufferSize) / asyncreader.BufferSize)
} else {
buffers = int(acc.size / asyncBufferSize)
buffers = int(acc.size / asyncreader.BufferSize)
}
// On big files add a buffer
if buffers > 0 {
in, err := newAsyncReader(acc.in, buffers)
in, err := asyncreader.New(acc.in, buffers)
if err != nil {
Errorf(acc.name, "Failed to make buffer: %v", err)
fs.Errorf(acc.name, "Failed to make buffer: %v", err)
} else {
acc.in = in
}
@@ -409,7 +415,7 @@ func (acc *Account) GetReader() io.ReadCloser {
// StopBuffering stops the async buffer doing any more buffering
func (acc *Account) StopBuffering() {
if asyncIn, ok := acc.in.(*asyncReader); ok {
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
asyncIn.Abandon()
}
}
@@ -484,7 +490,7 @@ func (acc *Account) read(in io.Reader, p []byte) (n int, err error) {
if tokenBucket != nil {
tbErr := tokenBucket.WaitN(context.Background(), n)
if tbErr != nil {
Errorf(nil, "Token bucket error: %v", err)
fs.Errorf(nil, "Token bucket error: %v", err)
}
}
tokenBucketMu.Unlock()
@@ -572,14 +578,14 @@ func (acc *Account) String() string {
}
}
name := []rune(acc.name)
if Config.StatsFileNameLength > 0 {
if len(name) > Config.StatsFileNameLength {
where := len(name) - Config.StatsFileNameLength
if fs.Config.StatsFileNameLength > 0 {
if len(name) > fs.Config.StatsFileNameLength {
where := len(name) - fs.Config.StatsFileNameLength
name = append([]rune{'.', '.', '.'}, name[where:]...)
}
}
if Config.DataRateUnit == "bits" {
if fs.Config.DataRateUnit == "bits" {
cur = cur * 8
}
@@ -588,12 +594,12 @@ func (acc *Account) String() string {
percentageDone = int(100 * float64(a) / float64(b))
}
done := fmt.Sprintf("%2d%% /%s", percentageDone, SizeSuffix(b))
done := fmt.Sprintf("%2d%% /%s", percentageDone, fs.SizeSuffix(b))
return fmt.Sprintf("%45s: %s, %s/s, %s",
string(name),
done,
SizeSuffix(cur),
fs.SizeSuffix(cur),
etas,
)
}
@@ -633,10 +639,10 @@ func (a *accountStream) Read(p []byte) (n int, err error) {
// AccountByPart turns off whole file accounting
//
// Returns the current account or nil if not found
func AccountByPart(obj Object) *Account {
func AccountByPart(obj fs.Object) *Account {
acc := Stats.inProgress.get(obj.Remote())
if acc == nil {
Debugf(obj, "Didn't find object to account part transfer")
fs.Debugf(obj, "Didn't find object to account part transfer")
return nil
}
acc.disableWholeFileAccounting()
@@ -647,7 +653,7 @@ func AccountByPart(obj Object) *Account {
//
// It disables the whole file counter and returns an io.Reader to wrap
// a segment of the transfer.
func AccountPart(obj Object, in io.Reader) io.Reader {
func AccountPart(obj fs.Object, in io.Reader) io.Reader {
acc := AccountByPart(obj)
if acc == nil {
return in

View File

@@ -3,7 +3,7 @@
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package fs
package accounting
// startSignalHandler() is Unix specific and does nothing under non-Unix
// platforms.

View File

@@ -3,12 +3,14 @@
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package fs
package accounting
import (
"os"
"os/signal"
"syscall"
"github.com/ncw/rclone/fs"
)
// startSignalHandler() sets a signal handler to catch SIGUSR2 and toggle throttling.
@@ -28,7 +30,7 @@ func startSignalHandler() {
s = "enabled"
}
tokenBucketMu.Unlock()
Logf(nil, "Bandwidth limit %s by user", s)
fs.Logf(nil, "Bandwidth limit %s by user", s)
}
}()
}

View File

@@ -1,14 +1,18 @@
package fs
// Package asyncreader provides an asynchronous reader which reads
// independently of write
package asyncreader
import (
"io"
"sync"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
)
const (
asyncBufferSize = 1024 * 1024
// BufferSize is the default size of the async buffer
BufferSize = 1024 * 1024
softStartInitial = 4 * 1024
)
@@ -18,11 +22,11 @@ var asyncBufferPool = sync.Pool{
var errorStreamAbandoned = errors.New("stream abandoned")
// asyncReader will do async read-ahead from the input reader
// AsyncReader will do async read-ahead from the input reader
// and make the data available as an io.Reader.
// This should be fully transparent, except that once an error
// has been returned from the Reader, it will not recover.
type asyncReader struct {
type AsyncReader struct {
in io.ReadCloser // Input reader
ready chan *buffer // Buffers ready to be handed to the reader
token chan struct{} // Tokens which allow a buffer to be taken
@@ -36,25 +40,25 @@ type asyncReader struct {
mu sync.Mutex // lock for Read/WriteTo/Abandon/Close
}
// newAsyncReader returns a reader that will asynchronously read from
// the supplied Reader into a number of buffers each of size asyncBufferSize
// New returns a reader that will asynchronously read from
// the supplied Reader into a number of buffers each of size BufferSize
// It will start reading from the input at once, maybe even before this
// function has returned.
// The input can be read from the returned reader.
// When done use Close to release the buffers and close the supplied input.
func newAsyncReader(rd io.ReadCloser, buffers int) (*asyncReader, error) {
func New(rd io.ReadCloser, buffers int) (*AsyncReader, error) {
if buffers <= 0 {
return nil, errors.New("number of buffers too small")
}
if rd == nil {
return nil, errors.New("nil reader supplied")
}
a := &asyncReader{}
a := &AsyncReader{}
a.init(rd, buffers)
return a, nil
}
func (a *asyncReader) init(rd io.ReadCloser, buffers int) {
func (a *AsyncReader) init(rd io.ReadCloser, buffers int) {
a.in = rd
a.ready = make(chan *buffer, buffers)
a.token = make(chan struct{}, buffers)
@@ -78,7 +82,7 @@ func (a *asyncReader) init(rd io.ReadCloser, buffers int) {
select {
case <-a.token:
b := a.getBuffer()
if a.size < asyncBufferSize {
if a.size < BufferSize {
b.buf = b.buf[:a.size]
a.size <<= 1
}
@@ -95,19 +99,19 @@ func (a *asyncReader) init(rd io.ReadCloser, buffers int) {
}
// return the buffer to the pool (clearing it)
func (a *asyncReader) putBuffer(b *buffer) {
func (a *AsyncReader) putBuffer(b *buffer) {
b.clear()
asyncBufferPool.Put(b)
}
// get a buffer from the pool
func (a *asyncReader) getBuffer() *buffer {
func (a *AsyncReader) getBuffer() *buffer {
b := asyncBufferPool.Get().(*buffer)
return b
}
// Read will return the next available data.
func (a *asyncReader) fill() (err error) {
func (a *AsyncReader) fill() (err error) {
if a.cur.isEmpty() {
if a.cur != nil {
a.putBuffer(a.cur)
@@ -128,7 +132,7 @@ func (a *asyncReader) fill() (err error) {
}
// Read will return the next available data.
func (a *asyncReader) Read(p []byte) (n int, err error) {
func (a *AsyncReader) Read(p []byte) (n int, err error) {
a.mu.Lock()
defer a.mu.Unlock()
@@ -153,7 +157,7 @@ func (a *asyncReader) Read(p []byte) (n int, err error) {
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *asyncReader) WriteTo(w io.Writer) (n int64, err error) {
func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
a.mu.Lock()
defer a.mu.Unlock()
@@ -177,8 +181,8 @@ func (a *asyncReader) WriteTo(w io.Writer) (n int64, err error) {
}
// Abandon will ensure that the underlying async reader is shut down.
// It will NOT close the input supplied on newAsyncReader.
func (a *asyncReader) Abandon() {
// It will NOT close the input supplied on New.
func (a *AsyncReader) Abandon() {
select {
case <-a.exit:
// Do nothing if reader routine already exited
@@ -202,8 +206,8 @@ func (a *asyncReader) Abandon() {
}
// Close will ensure that the underlying async reader is shut down.
// It will also close the input supplied on newAsyncReader.
func (a *asyncReader) Close() (err error) {
// It will also close the input supplied on New.
func (a *AsyncReader) Close() (err error) {
a.Abandon()
if a.closed {
return nil
@@ -223,7 +227,7 @@ type buffer struct {
func newBuffer() *buffer {
return &buffer{
buf: make([]byte, asyncBufferSize),
buf: make([]byte, BufferSize),
err: nil,
}
}
@@ -252,7 +256,7 @@ func (b *buffer) isEmpty() bool {
// Any error encountered during the read is returned.
func (b *buffer) read(rd io.Reader) error {
var n int
n, b.err = ReadFill(rd, b.buf)
n, b.err = readers.ReadFill(rd, b.buf)
b.buf = b.buf[0:n]
b.offset = 0
return b.err

View File

@@ -1,4 +1,4 @@
package fs
package asyncreader
import (
"bufio"
@@ -17,7 +17,7 @@ import (
func TestAsyncReader(t *testing.T) {
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
ar, err := newAsyncReader(buf, 4)
ar, err := New(buf, 4)
require.NoError(t, err)
var dst = make([]byte, 100)
@@ -42,7 +42,7 @@ func TestAsyncReader(t *testing.T) {
// Test Close without reading everything
buf = ioutil.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
ar, err = newAsyncReader(buf, 4)
ar, err = New(buf, 4)
require.NoError(t, err)
err = ar.Close()
require.NoError(t, err)
@@ -51,7 +51,7 @@ func TestAsyncReader(t *testing.T) {
func TestAsyncWriteTo(t *testing.T) {
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
ar, err := newAsyncReader(buf, 4)
ar, err := New(buf, 4)
require.NoError(t, err)
var dst = &bytes.Buffer{}
@@ -70,14 +70,14 @@ func TestAsyncWriteTo(t *testing.T) {
func TestAsyncReaderErrors(t *testing.T) {
// test nil reader
_, err := newAsyncReader(nil, 4)
_, err := New(nil, 4)
require.Error(t, err)
// invalid buffer number
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
_, err = newAsyncReader(buf, 0)
_, err = New(buf, 0)
require.Error(t, err)
_, err = newAsyncReader(buf, -1)
_, err = New(buf, -1)
require.Error(t, err)
}
@@ -157,9 +157,9 @@ func TestAsyncReaderSizes(t *testing.T) {
bufsize := bufsizes[k]
read := readmaker.fn(strings.NewReader(text))
buf := bufio.NewReaderSize(read, bufsize)
ar, _ := newAsyncReader(ioutil.NopCloser(buf), l)
ar, _ := New(ioutil.NopCloser(buf), l)
s := bufreader.fn(ar)
// "timeout" expects the Reader to recover, asyncReader does not.
// "timeout" expects the Reader to recover, AsyncReader does not.
if s != text && readmaker.name != "timeout" {
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
readmaker.name, bufreader.name, bufsize, text, s)
@@ -196,14 +196,14 @@ func TestAsyncReaderWriteTo(t *testing.T) {
bufsize := bufsizes[k]
read := readmaker.fn(strings.NewReader(text))
buf := bufio.NewReaderSize(read, bufsize)
ar, _ := newAsyncReader(ioutil.NopCloser(buf), l)
ar, _ := New(ioutil.NopCloser(buf), l)
dst := &bytes.Buffer{}
_, err := ar.WriteTo(dst)
if err != nil && err != io.EOF && err != iotest.ErrTimeout {
t.Fatal("Copy:", err)
}
s := dst.String()
// "timeout" expects the Reader to recover, asyncReader does not.
// "timeout" expects the Reader to recover, AsyncReader does not.
if s != text && readmaker.name != "timeout" {
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
readmaker.name, bufreader.name, bufsize, text, s)
@@ -243,7 +243,7 @@ func (z *zeroReader) Close() error {
// Test closing and abandoning
func testAsyncReaderClose(t *testing.T, writeto bool) {
zr := &zeroReader{}
a, err := newAsyncReader(zr, 16)
a, err := New(zr, 16)
require.NoError(t, err)
var copyN int64
var copyErr error

132
fs/bwtimetable.go Normal file
View File

@@ -0,0 +1,132 @@
package fs
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
)
// BwTimeSlot represents a bandwidth configuration at a point in time.
type BwTimeSlot struct {
HHMM int
Bandwidth SizeSuffix
}
// BwTimetable contains all configured time slots.
type BwTimetable []BwTimeSlot
// String returns a printable representation of BwTimetable.
func (x BwTimetable) String() string {
ret := []string{}
for _, ts := range x {
ret = append(ret, fmt.Sprintf("%04.4d,%s", ts.HHMM, ts.Bandwidth.String()))
}
return strings.Join(ret, " ")
}
// Set the bandwidth timetable.
func (x *BwTimetable) Set(s string) error {
// The timetable is formatted as:
// "hh:mm,bandwidth hh:mm,banwidth..." ex: "10:00,10G 11:30,1G 18:00,off"
// If only a single bandwidth identifier is provided, we assume constant bandwidth.
if len(s) == 0 {
return errors.New("empty string")
}
// Single value without time specification.
if !strings.Contains(s, " ") && !strings.Contains(s, ",") {
ts := BwTimeSlot{}
if err := ts.Bandwidth.Set(s); err != nil {
return err
}
ts.HHMM = 0
*x = BwTimetable{ts}
return nil
}
for _, tok := range strings.Split(s, " ") {
tv := strings.Split(tok, ",")
// Format must be HH:MM,BW
if len(tv) != 2 {
return errors.Errorf("invalid time/bandwidth specification: %q", tok)
}
// Basic timespec sanity checking
HHMM := tv[0]
if len(HHMM) != 5 {
return errors.Errorf("invalid time specification (hh:mm): %q", HHMM)
}
hh, err := strconv.Atoi(HHMM[0:2])
if err != nil {
return errors.Errorf("invalid hour in time specification %q: %v", HHMM, err)
}
if hh < 0 || hh > 23 {
return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
}
mm, err := strconv.Atoi(HHMM[3:])
if err != nil {
return errors.Errorf("invalid minute in time specification: %q: %v", HHMM, err)
}
if mm < 0 || mm > 59 {
return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
}
ts := BwTimeSlot{
HHMM: (hh * 100) + mm,
}
// Bandwidth limit for this time slot.
if err := ts.Bandwidth.Set(tv[1]); err != nil {
return err
}
*x = append(*x, ts)
}
return nil
}
// LimitAt returns a BwTimeSlot for the time requested.
func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
// If the timetable is empty, we return an unlimited BwTimeSlot starting at midnight.
if len(x) == 0 {
return BwTimeSlot{HHMM: 0, Bandwidth: -1}
}
HHMM := tt.Hour()*100 + tt.Minute()
// By default, we return the last element in the timetable. This
// satisfies two conditions: 1) If there's only one element it
// will always be selected, and 2) The last element of the table
// will "wrap around" until overriden by an earlier time slot.
// there's only one time slot in the timetable.
ret := x[len(x)-1]
mindif := 0
first := true
// Look for most recent time slot.
for _, ts := range x {
// Ignore the past
if HHMM < ts.HHMM {
continue
}
dif := ((HHMM / 100 * 60) + (HHMM % 100)) - ((ts.HHMM / 100 * 60) + (ts.HHMM % 100))
if first {
mindif = dif
first = false
}
if dif <= mindif {
mindif = dif
ret = ts
}
}
return ret
}
// Type of the value
func (x BwTimetable) Type() string {
return "BwTimetable"
}

113
fs/bwtimetable_test.go Normal file
View File

@@ -0,0 +1,113 @@
package fs
import (
"testing"
"time"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interface
var _ pflag.Value = (*BwTimetable)(nil)
func TestBwTimetableSet(t *testing.T) {
for _, test := range []struct {
in string
want BwTimetable
err bool
}{
{"", BwTimetable{}, true},
{"0", BwTimetable{BwTimeSlot{HHMM: 0, Bandwidth: 0}}, false},
{"666", BwTimetable{BwTimeSlot{HHMM: 0, Bandwidth: 666 * 1024}}, false},
{"10:20,666", BwTimetable{BwTimeSlot{HHMM: 1020, Bandwidth: 666 * 1024}}, false},
{
"11:00,333 13:40,666 23:50,10M 23:59,off",
BwTimetable{
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
BwTimeSlot{HHMM: 1340, Bandwidth: 666 * 1024},
BwTimeSlot{HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
BwTimeSlot{HHMM: 2359, Bandwidth: -1},
},
false,
},
{"bad,bad", BwTimetable{}, true},
{"bad bad", BwTimetable{}, true},
{"bad", BwTimetable{}, true},
{"1000X", BwTimetable{}, true},
{"2401,666", BwTimetable{}, true},
{"1061,666", BwTimetable{}, true},
} {
tt := BwTimetable{}
err := tt.Set(test.in)
if test.err {
require.Error(t, err)
} else {
require.NoError(t, err)
}
assert.Equal(t, test.want, tt)
}
}
func TestBwTimetableLimitAt(t *testing.T) {
for _, test := range []struct {
tt BwTimetable
now time.Time
want BwTimeSlot
}{
{
BwTimetable{},
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
BwTimeSlot{HHMM: 0, Bandwidth: -1},
},
{
BwTimetable{BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024}},
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
},
{
BwTimetable{
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
BwTimeSlot{HHMM: 2301, Bandwidth: 1024 * 1024},
BwTimeSlot{HHMM: 2350, Bandwidth: -1},
},
time.Date(2017, time.April, 20, 10, 15, 0, 0, time.UTC),
BwTimeSlot{HHMM: 2350, Bandwidth: -1},
},
{
BwTimetable{
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
BwTimeSlot{HHMM: 2301, Bandwidth: 1024 * 1024},
BwTimeSlot{HHMM: 2350, Bandwidth: -1},
},
time.Date(2017, time.April, 20, 11, 0, 0, 0, time.UTC),
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
},
{
BwTimetable{
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
BwTimeSlot{HHMM: 2301, Bandwidth: 1024 * 1024},
BwTimeSlot{HHMM: 2350, Bandwidth: -1},
},
time.Date(2017, time.April, 20, 13, 1, 0, 0, time.UTC),
BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
},
{
BwTimetable{
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
BwTimeSlot{HHMM: 2301, Bandwidth: 1024 * 1024},
BwTimeSlot{HHMM: 2350, Bandwidth: -1},
},
time.Date(2017, time.April, 20, 23, 59, 0, 0, time.UTC),
BwTimeSlot{HHMM: 2350, Bandwidth: -1},
},
} {
slot := test.tt.LimitAt(test.now)
assert.Equal(t, test.want, slot)
}
}

File diff suppressed because it is too large Load Diff

1159
fs/config/config.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package fs
package config
// attemptCopyGroups tries to keep the group the same, which only makes sense
// for system with user-group-world permission model.

View File

@@ -4,7 +4,7 @@
// +build !solaris,!plan9
package fs
package config
import (
"fmt"

View File

@@ -4,7 +4,7 @@
// +build solaris plan9
package fs
package config
// ReadPassword reads a password with echoing it to the terminal.
func ReadPassword() string {

View File

@@ -1,45 +1,15 @@
package fs
package config
import (
"bytes"
"crypto/rand"
"io/ioutil"
"os"
"testing"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestObscure(t *testing.T) {
for _, test := range []struct {
in string
want string
iv string
}{
{"", "YWFhYWFhYWFhYWFhYWFhYQ", "aaaaaaaaaaaaaaaa"},
{"potato", "YWFhYWFhYWFhYWFhYWFhYXMaGgIlEQ", "aaaaaaaaaaaaaaaa"},
{"potato", "YmJiYmJiYmJiYmJiYmJiYp3gcEWbAw", "bbbbbbbbbbbbbbbb"},
} {
cryptRand = bytes.NewBufferString(test.iv)
got, err := Obscure(test.in)
cryptRand = rand.Reader
assert.NoError(t, err)
assert.Equal(t, test.want, got)
recoveredIn, err := Reveal(got)
assert.NoError(t, err)
assert.Equal(t, test.in, recoveredIn, "not bidirectional")
// Now the Must variants
cryptRand = bytes.NewBufferString(test.iv)
got = MustObscure(test.in)
cryptRand = rand.Reader
assert.Equal(t, test.want, got)
recoveredIn = MustReveal(got)
assert.Equal(t, test.in, recoveredIn, "not bidirectional")
}
}
func TestCRUD(t *testing.T) {
configKey = nil // reset password
// create temp config file
@@ -54,39 +24,47 @@ func TestCRUD(t *testing.T) {
// temporarily adapt configuration
oldOsStdout := os.Stdout
oldConfigFile := configFile
oldConfig := Config
oldConfigPath := ConfigPath
oldConfig := fs.Config
oldConfigData := configData
oldReadLine := ReadLine
os.Stdout = nil
configFile = &path
Config = &ConfigInfo{}
ConfigPath = path
fs.Config = &fs.ConfigInfo{}
configData = nil
defer func() {
os.Stdout = oldOsStdout
configFile = oldConfigFile
ConfigPath = oldConfigPath
ReadLine = oldReadLine
Config = oldConfig
fs.Config = oldConfig
configData = oldConfigData
}()
LoadConfig()
assert.Equal(t, []string{}, configData.GetSectionList())
// Fake a remote
fs.Register(&fs.RegInfo{Name: "config_test_remote"})
// add new remote
i := 0
ReadLine = func() string {
answers := []string{
"local", // type is local
"1", // yes, disable long filenames
"y", // looks good, save
"config_test_remote", // type
"y", // looks good, save
}
i = i + 1
return answers[i-1]
}
NewRemote("test")
assert.Equal(t, []string{"test"}, configData.GetSectionList())
// Reload the config file to workaround this bug
// https://github.com/Unknwon/goconfig/issues/39
configData, err = loadConfigFile()
require.NoError(t, err)
// normal rename, test → asdf
ReadLine = func() string { return "asdf" }
RenameRemote("test")
@@ -226,50 +204,3 @@ func hashedKeyCompare(t *testing.T, a, b string, shouldMatch bool) {
assert.NotEqual(t, k1, k2)
}
}
func TestDumpFlagsString(t *testing.T) {
assert.Equal(t, "", DumpFlags(0).String())
assert.Equal(t, "headers", (DumpHeaders).String())
assert.Equal(t, "headers,bodies", (DumpHeaders | DumpBodies).String())
assert.Equal(t, "headers,bodies,requests,responses,auth,filters", (DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters).String())
assert.Equal(t, "headers,Unknown-0x8000", (DumpHeaders | DumpFlags(0x8000)).String())
}
func TestDumpFlagsSet(t *testing.T) {
for _, test := range []struct {
in string
want DumpFlags
wantErr string
}{
{"", DumpFlags(0), ""},
{"bodies", DumpBodies, ""},
{"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""},
{"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""},
{"headers,bodies,requests,responses,auth,filters", DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters, ""},
{"headers,bodies,unknown,auth", 0, "Unknown dump flag \"unknown\""},
} {
f := DumpFlags(-1)
initial := f
err := f.Set(test.in)
if err != nil {
if test.wantErr == "" {
t.Errorf("Got an error when not expecting one on %q: %v", test.in, err)
} else {
assert.Contains(t, err.Error(), test.wantErr)
}
assert.Equal(t, initial, f, test.want)
} else {
if test.wantErr != "" {
t.Errorf("Got no error when expecting one on %q", test.in)
} else {
assert.Equal(t, test.want, f)
}
}
}
}
func TestDumpFlagsType(t *testing.T) {
f := DumpFlags(0)
assert.Equal(t, "string", f.Type())
}

View File

@@ -3,13 +3,15 @@
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package fs
package config
import (
"os"
"os/user"
"strconv"
"syscall"
"github.com/ncw/rclone/fs"
)
// attemptCopyGroups tries to keep the group the same. User will be the one
@@ -29,7 +31,7 @@ func attemptCopyGroup(fromPath, toPath string) {
}
}
if err = os.Chown(toPath, uid, int(stat.Gid)); err != nil {
Debugf(nil, "Failed to keep previous owner of config file: %v", err)
fs.Debugf(nil, "Failed to keep previous owner of config file: %v", err)
}
}
}

View File

@@ -0,0 +1,162 @@
// Package configflags defines the flags used by rclone. It is
// decoupled into a separate package so it can be replaced.
package configflags
// Options set by command line flags
import (
"log"
"net"
"path/filepath"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/spf13/pflag"
)
var (
// these will get interpreted into fs.Config via SetFlags() below
verbose int
quiet bool
dumpHeaders bool
dumpBodies bool
deleteBefore bool
deleteDuring bool
deleteAfter bool
bindAddr string
disableFeatures string
)
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
// NB defaults which aren't the zero for the type should be set in fs/config.go NewConfig
flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)")
flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible")
flags.DurationVarP(flagSet, &fs.Config.ModifyWindow, "modify-window", "", fs.Config.ModifyWindow, "Max time diff to be considered the same")
flags.IntVarP(flagSet, &fs.Config.Checkers, "checkers", "", fs.Config.Checkers, "Number of checkers to run in parallel.")
flags.IntVarP(flagSet, &fs.Config.Transfers, "transfers", "", fs.Config.Transfers, "Number of file transfers to run in parallel.")
flags.StringVarP(flagSet, &config.ConfigPath, "config", "", config.ConfigPath, "Config file.")
flags.StringVarP(flagSet, &config.CacheDir, "cache-dir", "", config.CacheDir, "Directory rclone will use for caching.")
flags.BoolVarP(flagSet, &fs.Config.CheckSum, "checksum", "c", fs.Config.CheckSum, "Skip based on checksum & size, not mod-time & size")
flags.BoolVarP(flagSet, &fs.Config.SizeOnly, "size-only", "", fs.Config.SizeOnly, "Skip based on size only, not mod-time or checksum")
flags.BoolVarP(flagSet, &fs.Config.IgnoreTimes, "ignore-times", "I", fs.Config.IgnoreTimes, "Don't skip files that match size and time - transfer all files")
flags.BoolVarP(flagSet, &fs.Config.IgnoreExisting, "ignore-existing", "", fs.Config.IgnoreExisting, "Skip all files that exist on destination")
flags.BoolVarP(flagSet, &fs.Config.DryRun, "dry-run", "n", fs.Config.DryRun, "Do a trial run with no permanent changes")
flags.DurationVarP(flagSet, &fs.Config.ConnectTimeout, "contimeout", "", fs.Config.ConnectTimeout, "Connect timeout")
flags.DurationVarP(flagSet, &fs.Config.Timeout, "timeout", "", fs.Config.Timeout, "IO idle timeout")
flags.BoolVarP(flagSet, &dumpHeaders, "dump-headers", "", false, "Dump HTTP bodies - may contain sensitive info")
flags.BoolVarP(flagSet, &dumpBodies, "dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
flags.BoolVarP(flagSet, &fs.Config.InsecureSkipVerify, "no-check-certificate", "", fs.Config.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.")
flags.BoolVarP(flagSet, &fs.Config.AskPassword, "ask-password", "", fs.Config.AskPassword, "Allow prompt for password for encrypted configuration.")
flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transfering")
flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer (default)")
flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transfering")
flags.BoolVarP(flagSet, &fs.Config.TrackRenames, "track-renames", "", fs.Config.TrackRenames, "When synchronizing, track file renames and do a server side move if possible")
flags.IntVarP(flagSet, &fs.Config.LowLevelRetries, "low-level-retries", "", fs.Config.LowLevelRetries, "Number of low level retries to do.")
flags.BoolVarP(flagSet, &fs.Config.UpdateOlder, "update", "u", fs.Config.UpdateOlder, "Skip files that are newer on the destination.")
flags.BoolVarP(flagSet, &fs.Config.NoGzip, "no-gzip-encoding", "", fs.Config.NoGzip, "Don't set Accept-Encoding: gzip.")
flags.IntVarP(flagSet, &fs.Config.MaxDepth, "max-depth", "", fs.Config.MaxDepth, "If set limits the recursion depth to this.")
flags.BoolVarP(flagSet, &fs.Config.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
flags.BoolVarP(flagSet, &fs.Config.IgnoreChecksum, "ignore-checksum", "", fs.Config.IgnoreChecksum, "Skip post copy check of checksums.")
flags.BoolVarP(flagSet, &fs.Config.NoTraverse, "no-traverse", "", fs.Config.NoTraverse, "Don't traverse destination file system on copy.")
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix for use with --backup-dir.")
flags.BoolVarP(flagSet, &fs.Config.UseListR, "fast-list", "", fs.Config.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.")
flags.Float64VarP(flagSet, &fs.Config.TPSLimit, "tpslimit", "", fs.Config.TPSLimit, "Limit HTTP transactions per second to this.")
flags.IntVarP(flagSet, &fs.Config.TPSLimitBurst, "tpslimit-burst", "", fs.Config.TPSLimitBurst, "Max burst of transactions for --tpslimit.")
flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.")
flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features. Use help to see a list.")
flags.StringVarP(flagSet, &fs.Config.UserAgent, "user-agent", "", fs.Config.UserAgent, "Set the user-agent to a specified string. The default is rclone/ version")
flags.BoolVarP(flagSet, &fs.Config.Immutable, "immutable", "", fs.Config.Immutable, "Do not modify files. Fail if existing files have been modified.")
flags.BoolVarP(flagSet, &fs.Config.AutoConfirm, "auto-confirm", "", fs.Config.AutoConfirm, "If enabled, do not request console confirmation.")
flags.IntVarP(flagSet, &fs.Config.StatsFileNameLength, "stats-file-name-length", "", fs.Config.StatsFileNameLength, "Max file name length in stats. 0 for no limit")
flags.FVarP(flagSet, &fs.Config.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
flags.FVarP(flagSet, &fs.Config.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
flags.FVarP(flagSet, &fs.Config.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
flags.FVarP(flagSet, &fs.Config.BufferSize, "buffer-size", "", "Buffer size when copying files.")
flags.FVarP(flagSet, &fs.Config.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
}
// SetFlags converts any flags into config which weren't straight foward
func SetFlags() {
fs.Config.LogLevel = fs.LogLevelNotice
if verbose >= 2 {
fs.Config.LogLevel = fs.LogLevelDebug
} else if verbose >= 1 {
fs.Config.LogLevel = fs.LogLevelInfo
}
if quiet {
if verbose > 0 {
log.Fatalf("Can't set -v and -q")
}
fs.Config.LogLevel = fs.LogLevelError
}
logLevelFlag := pflag.Lookup("log-level")
if logLevelFlag != nil && logLevelFlag.Changed {
if verbose > 0 {
log.Fatalf("Can't set -v and --log-level")
}
if quiet {
log.Fatalf("Can't set -q and --log-level")
}
}
if dumpHeaders {
fs.Config.Dump |= fs.DumpHeaders
fs.Infof(nil, "--dump-headers is obsolete - please use --dump headers instead")
}
if dumpBodies {
fs.Config.Dump |= fs.DumpBodies
fs.Infof(nil, "--dump-bodies is obsolete - please use --dump bodies instead")
}
switch {
case deleteBefore && (deleteDuring || deleteAfter),
deleteDuring && deleteAfter:
log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`)
case deleteBefore:
fs.Config.DeleteMode = fs.DeleteModeBefore
case deleteDuring:
fs.Config.DeleteMode = fs.DeleteModeDuring
case deleteAfter:
fs.Config.DeleteMode = fs.DeleteModeAfter
default:
fs.Config.DeleteMode = fs.DeleteModeDefault
}
if fs.Config.IgnoreSize && fs.Config.SizeOnly {
log.Fatalf(`Can't use --size-only and --ignore-size together.`)
}
if fs.Config.Suffix != "" && fs.Config.BackupDir == "" {
log.Fatalf(`Can only use --suffix with --backup-dir.`)
}
if bindAddr != "" {
addrs, err := net.LookupIP(bindAddr)
if err != nil {
log.Fatalf("--bind: Failed to parse %q as IP address: %v", bindAddr, err)
}
if len(addrs) != 1 {
log.Fatalf("--bind: Expecting 1 IP address for %q but got %d", bindAddr, len(addrs))
}
fs.Config.BindAddr = addrs[0]
}
if disableFeatures != "" {
if disableFeatures == "help" {
log.Fatalf("Possible backend features are: %s\n", strings.Join(new(fs.Features).List(), ", "))
}
fs.Config.DisableFeatures = strings.Split(disableFeatures, ",")
}
// Make the config file absolute
configPath, err := filepath.Abs(config.ConfigPath)
if err == nil {
config.ConfigPath = configPath
}
}

View File

@@ -1,239 +1,17 @@
// This contains helper functions for managing flags
package fs
// Package flags contains enahnced versions of spf13/pflag flag
// routines which will read from the environment also.
package flags
import (
"fmt"
"log"
"math"
"os"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/ncw/rclone/fs"
"github.com/spf13/pflag"
)
// SizeSuffix is parsed by flag with k/M/G suffixes
type SizeSuffix int64
// Turn SizeSuffix into a string and a suffix
func (x SizeSuffix) string() (string, string) {
scaled := float64(0)
suffix := ""
switch {
case x < 0:
return "off", ""
case x == 0:
return "0", ""
case x < 1024:
scaled = float64(x)
suffix = ""
case x < 1024*1024:
scaled = float64(x) / 1024
suffix = "k"
case x < 1024*1024*1024:
scaled = float64(x) / 1024 / 1024
suffix = "M"
default:
scaled = float64(x) / 1024 / 1024 / 1024
suffix = "G"
}
if math.Floor(scaled) == scaled {
return fmt.Sprintf("%.0f", scaled), suffix
}
return fmt.Sprintf("%.3f", scaled), suffix
}
// String turns SizeSuffix into a string
func (x SizeSuffix) String() string {
val, suffix := x.string()
return val + suffix
}
// Unit turns SizeSuffix into a string with a unit
func (x SizeSuffix) Unit(unit string) string {
val, suffix := x.string()
if val == "off" {
return val
}
return val + " " + suffix + unit
}
// Set a SizeSuffix
func (x *SizeSuffix) Set(s string) error {
if len(s) == 0 {
return errors.New("empty string")
}
if strings.ToLower(s) == "off" {
*x = -1
return nil
}
suffix := s[len(s)-1]
suffixLen := 1
var multiplier float64
switch suffix {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
suffixLen = 0
multiplier = 1 << 10
case 'b', 'B':
multiplier = 1
case 'k', 'K':
multiplier = 1 << 10
case 'm', 'M':
multiplier = 1 << 20
case 'g', 'G':
multiplier = 1 << 30
default:
return errors.Errorf("bad suffix %q", suffix)
}
s = s[:len(s)-suffixLen]
value, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if value < 0 {
return errors.Errorf("size can't be negative %q", s)
}
value *= multiplier
*x = SizeSuffix(value)
return nil
}
// Type of the value
func (x *SizeSuffix) Type() string {
return "int64"
}
// Check it satisfies the interface
var _ pflag.Value = (*SizeSuffix)(nil)
// BwTimeSlot represents a bandwidth configuration at a point in time.
type BwTimeSlot struct {
hhmm int
bandwidth SizeSuffix
}
// BwTimetable contains all configured time slots.
type BwTimetable []BwTimeSlot
// String returns a printable representation of BwTimetable.
func (x BwTimetable) String() string {
ret := []string{}
for _, ts := range x {
ret = append(ret, fmt.Sprintf("%04.4d,%s", ts.hhmm, ts.bandwidth.String()))
}
return strings.Join(ret, " ")
}
// Set the bandwidth timetable.
func (x *BwTimetable) Set(s string) error {
// The timetable is formatted as:
// "hh:mm,bandwidth hh:mm,banwidth..." ex: "10:00,10G 11:30,1G 18:00,off"
// If only a single bandwidth identifier is provided, we assume constant bandwidth.
if len(s) == 0 {
return errors.New("empty string")
}
// Single value without time specification.
if !strings.Contains(s, " ") && !strings.Contains(s, ",") {
ts := BwTimeSlot{}
if err := ts.bandwidth.Set(s); err != nil {
return err
}
ts.hhmm = 0
*x = BwTimetable{ts}
return nil
}
for _, tok := range strings.Split(s, " ") {
tv := strings.Split(tok, ",")
// Format must be HH:MM,BW
if len(tv) != 2 {
return errors.Errorf("invalid time/bandwidth specification: %q", tok)
}
// Basic timespec sanity checking
hhmm := tv[0]
if len(hhmm) != 5 {
return errors.Errorf("invalid time specification (hh:mm): %q", hhmm)
}
hh, err := strconv.Atoi(hhmm[0:2])
if err != nil {
return errors.Errorf("invalid hour in time specification %q: %v", hhmm, err)
}
if hh < 0 || hh > 23 {
return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
}
mm, err := strconv.Atoi(hhmm[3:])
if err != nil {
return errors.Errorf("invalid minute in time specification: %q: %v", hhmm, err)
}
if mm < 0 || mm > 59 {
return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
}
ts := BwTimeSlot{
hhmm: (hh * 100) + mm,
}
// Bandwidth limit for this time slot.
if err := ts.bandwidth.Set(tv[1]); err != nil {
return err
}
*x = append(*x, ts)
}
return nil
}
// LimitAt returns a BwTimeSlot for the time requested.
func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
// If the timetable is empty, we return an unlimited BwTimeSlot starting at midnight.
if len(x) == 0 {
return BwTimeSlot{hhmm: 0, bandwidth: -1}
}
hhmm := tt.Hour()*100 + tt.Minute()
// By default, we return the last element in the timetable. This
// satisfies two conditions: 1) If there's only one element it
// will always be selected, and 2) The last element of the table
// will "wrap around" until overriden by an earlier time slot.
// there's only one time slot in the timetable.
ret := x[len(x)-1]
mindif := 0
first := true
// Look for most recent time slot.
for _, ts := range x {
// Ignore the past
if hhmm < ts.hhmm {
continue
}
dif := ((hhmm / 100 * 60) + (hhmm % 100)) - ((ts.hhmm / 100 * 60) + (ts.hhmm % 100))
if first {
mindif = dif
first = false
}
if dif <= mindif {
mindif = dif
ret = ts
}
}
return ret
}
// Type of the value
func (x BwTimetable) Type() string {
return "BwTimetable"
}
// Check it satisfies the interface
var _ pflag.Value = (*BwTimetable)(nil)
// optionToEnv converts an option name, eg "ignore-size" into an
// environment name "RCLONE_IGNORE_SIZE"
func optionToEnv(name string) string {
@@ -254,7 +32,7 @@ func setDefaultFromEnv(name string) {
if err != nil {
log.Fatalf("Invalid value for environment variable %q: %v", key, err)
}
Debugf(nil, "Set default for %q from %q to %q (%v)", name, key, newValue, flag.Value)
fs.Debugf(nil, "Set default for %q from %q to %q (%v)", name, key, newValue, flag.Value)
flag.DefValue = newValue
}
}
@@ -302,6 +80,15 @@ func IntP(name, shorthand string, value int, usage string) (out *int) {
return out
}
// Int64P defines a flag which can be overridden by an environment variable
//
// It is a thin wrapper around pflag.IntP
func Int64P(name, shorthand string, value int64, usage string) (out *int64) {
out = pflag.Int64P(name, shorthand, value, usage)
setDefaultFromEnv(name)
return out
}
// IntVarP defines a flag which can be overridden by an environment variable
//
// It is a thin wrapper around pflag.IntVarP
@@ -360,10 +147,10 @@ func VarP(value pflag.Value, name, shorthand, usage string) {
setDefaultFromEnv(name)
}
// FlagsVarP defines a flag which can be overridden by an environment variable
// FVarP defines a flag which can be overridden by an environment variable
//
// It is a thin wrapper around pflag.VarP
func FlagsVarP(flags *pflag.FlagSet, value pflag.Value, name, shorthand, usage string) {
func FVarP(flags *pflag.FlagSet, value pflag.Value, name, shorthand, usage string) {
flags.VarP(value, name, shorthand, usage)
setDefaultFromEnv(name)
}

95
fs/config/obscure.go Normal file
View File

@@ -0,0 +1,95 @@
// Obscure and Reveal config values
package config
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"io"
"log"
"github.com/pkg/errors"
)
// crypt internals
var (
cryptKey = []byte{
0x9c, 0x93, 0x5b, 0x48, 0x73, 0x0a, 0x55, 0x4d,
0x6b, 0xfd, 0x7c, 0x63, 0xc8, 0x86, 0xa9, 0x2b,
0xd3, 0x90, 0x19, 0x8e, 0xb8, 0x12, 0x8a, 0xfb,
0xf4, 0xde, 0x16, 0x2b, 0x8b, 0x95, 0xf6, 0x38,
}
cryptBlock cipher.Block
cryptRand = rand.Reader
)
// crypt transforms in to out using iv under AES-CTR.
//
// in and out may be the same buffer.
//
// Note encryption and decryption are the same operation
func crypt(out, in, iv []byte) error {
if cryptBlock == nil {
var err error
cryptBlock, err = aes.NewCipher(cryptKey)
if err != nil {
return err
}
}
stream := cipher.NewCTR(cryptBlock, iv)
stream.XORKeyStream(out, in)
return nil
}
// Obscure a value
//
// This is done by encrypting with AES-CTR
func Obscure(x string) (string, error) {
plaintext := []byte(x)
ciphertext := make([]byte, aes.BlockSize+len(plaintext))
iv := ciphertext[:aes.BlockSize]
if _, err := io.ReadFull(cryptRand, iv); err != nil {
return "", errors.Wrap(err, "failed to read iv")
}
if err := crypt(ciphertext[aes.BlockSize:], plaintext, iv); err != nil {
return "", errors.Wrap(err, "encrypt failed")
}
return base64.RawURLEncoding.EncodeToString(ciphertext), nil
}
// MustObscure obscures a value, exiting with a fatal error if it failed
func MustObscure(x string) string {
out, err := Obscure(x)
if err != nil {
log.Fatalf("Obscure failed: %v", err)
}
return out
}
// Reveal an obscured value
func Reveal(x string) (string, error) {
ciphertext, err := base64.RawURLEncoding.DecodeString(x)
if err != nil {
return "", errors.Wrap(err, "base64 decode failed when revealing password - is it obscured?")
}
if len(ciphertext) < aes.BlockSize {
return "", errors.New("input too short when revealing password - is it obscured?")
}
buf := ciphertext[aes.BlockSize:]
iv := ciphertext[:aes.BlockSize]
if err := crypt(buf, buf, iv); err != nil {
return "", errors.Wrap(err, "decrypt failed when revealing password - is it obscured?")
}
return string(buf), nil
}
// MustReveal reveals an obscured value, exiting with a fatal error if it failed
func MustReveal(x string) string {
out, err := Reveal(x)
if err != nil {
log.Fatalf("Reveal failed: %v", err)
}
return out
}

38
fs/config/obscure_test.go Normal file
View File

@@ -0,0 +1,38 @@
package config
import (
"bytes"
"crypto/rand"
"testing"
"github.com/stretchr/testify/assert"
)
func TestObscure(t *testing.T) {
for _, test := range []struct {
in string
want string
iv string
}{
{"", "YWFhYWFhYWFhYWFhYWFhYQ", "aaaaaaaaaaaaaaaa"},
{"potato", "YWFhYWFhYWFhYWFhYWFhYXMaGgIlEQ", "aaaaaaaaaaaaaaaa"},
{"potato", "YmJiYmJiYmJiYmJiYmJiYp3gcEWbAw", "bbbbbbbbbbbbbbbb"},
} {
cryptRand = bytes.NewBufferString(test.iv)
got, err := Obscure(test.in)
cryptRand = rand.Reader
assert.NoError(t, err)
assert.Equal(t, test.want, got)
recoveredIn, err := Reveal(got)
assert.NoError(t, err)
assert.Equal(t, test.in, recoveredIn, "not bidirectional")
// Now the Must variants
cryptRand = bytes.NewBufferString(test.iv)
got = MustObscure(test.in)
cryptRand = rand.Reader
assert.Equal(t, test.want, got)
recoveredIn = MustReveal(got)
assert.Equal(t, test.in, recoveredIn, "not bidirectional")
}
}

View File

@@ -1,28 +0,0 @@
package fs
import "io"
// NewCountingReader returns a CountingReader, which will read from the given
// reader while keeping track of how many bytes were read.
func NewCountingReader(in io.Reader) *CountingReader {
return &CountingReader{in: in}
}
// CountingReader holds a reader and a read count of how many bytes were read
// so far.
type CountingReader struct {
in io.Reader
read uint64
}
// Read reads from the underlying reader.
func (cr *CountingReader) Read(b []byte) (int, error) {
n, err := cr.in.Read(b)
cr.read += uint64(n)
return n, err
}
// BytesRead returns how many bytes were read from the underlying reader so far.
func (cr *CountingReader) BytesRead() uint64 {
return cr.read
}

14
fs/deletemode.go Normal file
View File

@@ -0,0 +1,14 @@
package fs
// DeleteMode describes the possible delete modes in the config
type DeleteMode byte
// DeleteMode constants
const (
DeleteModeOff DeleteMode = iota
DeleteModeBefore
DeleteModeDuring
DeleteModeAfter
DeleteModeOnly
DeleteModeDefault = DeleteModeAfter
)

81
fs/direntries.go Normal file
View File

@@ -0,0 +1,81 @@
package fs
import "fmt"
// DirEntries is a slice of Object or *Dir
type DirEntries []DirEntry
// Len is part of sort.Interface.
func (ds DirEntries) Len() int {
return len(ds)
}
// Swap is part of sort.Interface.
func (ds DirEntries) Swap(i, j int) {
ds[i], ds[j] = ds[j], ds[i]
}
// Less is part of sort.Interface.
func (ds DirEntries) Less(i, j int) bool {
return ds[i].Remote() < ds[j].Remote()
}
// ForObject runs the function supplied on every object in the entries
func (ds DirEntries) ForObject(fn func(o Object)) {
for _, entry := range ds {
o, ok := entry.(Object)
if ok {
fn(o)
}
}
}
// ForObjectError runs the function supplied on every object in the entries
func (ds DirEntries) ForObjectError(fn func(o Object) error) error {
for _, entry := range ds {
o, ok := entry.(Object)
if ok {
err := fn(o)
if err != nil {
return err
}
}
}
return nil
}
// ForDir runs the function supplied on every Directory in the entries
func (ds DirEntries) ForDir(fn func(dir Directory)) {
for _, entry := range ds {
dir, ok := entry.(Directory)
if ok {
fn(dir)
}
}
}
// ForDirError runs the function supplied on every Directory in the entries
func (ds DirEntries) ForDirError(fn func(dir Directory) error) error {
for _, entry := range ds {
dir, ok := entry.(Directory)
if ok {
err := fn(dir)
if err != nil {
return err
}
}
}
return nil
}
// DirEntryType returns a string description of the DirEntry, either
// "object", "directory" or "unknown type XXX"
func DirEntryType(d DirEntry) string {
switch d.(type) {
case Object:
return "object"
case Directory:
return "directory"
}
return fmt.Sprintf("unknown type %T", d)
}

View File

@@ -1,12 +0,0 @@
// +build !windows
package fs
// isDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
//
// On non windows platforms we don't have drive letters so we always
// return false
func isDriveLetter(name string) bool {
return false
}

View File

@@ -0,0 +1,14 @@
// Package driveletter returns whether a name is a valid drive letter
// +build !windows
package driveletter
// IsDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
//
// On non windows platforms we don't have drive letters so we always
// return false
func IsDriveLetter(name string) bool {
return false
}

View File

@@ -1,10 +1,10 @@
// +build windows
package fs
package driveletter
// isDriveLetter returns a bool indicating whether name is a valid
// IsDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
func isDriveLetter(name string) bool {
func IsDriveLetter(name string) bool {
if len(name) != 1 {
return false
}

89
fs/dump.go Normal file
View File

@@ -0,0 +1,89 @@
package fs
import (
"fmt"
"strings"
"github.com/pkg/errors"
)
// DumpFlags describes the Dump options in force
type DumpFlags int
// DumpFlags definitions
const (
DumpHeaders DumpFlags = 1 << iota
DumpBodies
DumpRequests
DumpResponses
DumpAuth
DumpFilters
)
var dumpFlags = []struct {
flag DumpFlags
name string
}{
{DumpHeaders, "headers"},
{DumpBodies, "bodies"},
{DumpRequests, "requests"},
{DumpResponses, "responses"},
{DumpAuth, "auth"},
{DumpFilters, "filters"},
}
// DumpFlagsList is a list of dump flags used in the help
var DumpFlagsList string
func init() {
// calculate the dump flags list
var out []string
for _, info := range dumpFlags {
out = append(out, info.name)
}
DumpFlagsList = strings.Join(out, ",")
}
// String turns a DumpFlags into a string
func (f DumpFlags) String() string {
var out []string
for _, info := range dumpFlags {
if f&info.flag != 0 {
out = append(out, info.name)
f &^= info.flag
}
}
if f != 0 {
out = append(out, fmt.Sprintf("Unknown-0x%X", int(f)))
}
return strings.Join(out, ",")
}
// Set a DumpFlags as a comma separated list of flags
func (f *DumpFlags) Set(s string) error {
var flags DumpFlags
parts := strings.Split(s, ",")
for _, part := range parts {
found := false
part = strings.ToLower(strings.TrimSpace(part))
if part == "" {
continue
}
for _, info := range dumpFlags {
if part == info.name {
found = true
flags |= info.flag
}
}
if !found {
return errors.Errorf("Unknown dump flag %q", part)
}
}
*f = flags
return nil
}
// Type of the value
func (f *DumpFlags) Type() string {
return "string"
}

58
fs/dump_test.go Normal file
View File

@@ -0,0 +1,58 @@
package fs
import (
"testing"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
)
// Check it satisfies the interface
var _ pflag.Value = (*DumpFlags)(nil)
func TestDumpFlagsString(t *testing.T) {
assert.Equal(t, "", DumpFlags(0).String())
assert.Equal(t, "headers", (DumpHeaders).String())
assert.Equal(t, "headers,bodies", (DumpHeaders | DumpBodies).String())
assert.Equal(t, "headers,bodies,requests,responses,auth,filters", (DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters).String())
assert.Equal(t, "headers,Unknown-0x8000", (DumpHeaders | DumpFlags(0x8000)).String())
}
func TestDumpFlagsSet(t *testing.T) {
for _, test := range []struct {
in string
want DumpFlags
wantErr string
}{
{"", DumpFlags(0), ""},
{"bodies", DumpBodies, ""},
{"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""},
{"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""},
{"headers,bodies,requests,responses,auth,filters", DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters, ""},
{"headers,bodies,unknown,auth", 0, "Unknown dump flag \"unknown\""},
} {
f := DumpFlags(-1)
initial := f
err := f.Set(test.in)
if err != nil {
if test.wantErr == "" {
t.Errorf("Got an error when not expecting one on %q: %v", test.in, err)
} else {
assert.Contains(t, err.Error(), test.wantErr)
}
assert.Equal(t, initial, f, test.want)
} else {
if test.wantErr != "" {
t.Errorf("Got no error when expecting one on %q", test.in)
} else {
assert.Equal(t, test.want, f)
}
}
}
}
func TestDumpFlagsType(t *testing.T) {
f := DumpFlags(0)
assert.Equal(t, "string", f.Type())
}

View File

@@ -1,43 +1,22 @@
// Control the filtering of files
package fs
// Package filter controls the filtering of files
package filter
import (
"bufio"
"fmt"
"log"
"os"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// Global
var (
// Flags
deleteExcluded = BoolP("delete-excluded", "", false, "Delete files on dest excluded from sync")
filterRule = StringArrayP("filter", "f", nil, "Add a file-filtering rule")
filterFrom = StringArrayP("filter-from", "", nil, "Read filtering patterns from a file")
excludeRule = StringArrayP("exclude", "", nil, "Exclude files matching pattern")
excludeFrom = StringArrayP("exclude-from", "", nil, "Read exclude patterns from file")
excludeFile = StringP("exclude-if-present", "", "", "Exclude directories if filename is present")
includeRule = StringArrayP("include", "", nil, "Include files matching pattern")
includeFrom = StringArrayP("include-from", "", nil, "Read include patterns from file")
filesFrom = StringArrayP("files-from", "", nil, "Read list of source-file names from file")
minAge = StringP("min-age", "", "", "Don't transfer any file younger than this in s or suffix ms|s|m|h|d|w|M|y")
maxAge = StringP("max-age", "", "", "Don't transfer any file older than this in s or suffix ms|s|m|h|d|w|M|y")
minSize = SizeSuffix(-1)
maxSize = SizeSuffix(-1)
//cvsExclude = BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
)
func init() {
VarP(&minSize, "min-size", "", "Don't transfer any file smaller than this in k or suffix b|k|M|G")
VarP(&maxSize, "max-size", "", "Don't transfer any file larger than this in k or suffix b|k|M|G")
}
// Active is the globally active filter
var Active = mustNewFilter(nil)
// rule is one filter rule
type rule struct {
@@ -96,167 +75,137 @@ func (rs *rules) len() int {
// FilesMap describes the map of files to transfer
type FilesMap map[string]struct{}
// Opt configues the filter
type Opt struct {
DeleteExcluded bool
FilterRule []string
FilterFrom []string
ExcludeRule []string
ExcludeFrom []string
ExcludeFile string
IncludeRule []string
IncludeFrom []string
FilesFrom []string
MinAge fs.Duration
MaxAge fs.Duration
MinSize fs.SizeSuffix
MaxSize fs.SizeSuffix
}
const unusedAge = fs.Duration((1 << 63) - 1)
// DefaultOpt is the default config for the filter
var DefaultOpt = Opt{
MinAge: unusedAge,
MaxAge: unusedAge,
MinSize: fs.SizeSuffix(-1),
MaxSize: fs.SizeSuffix(-1),
}
// Filter describes any filtering in operation
type Filter struct {
DeleteExcluded bool
MinSize int64
MaxSize int64
ModTimeFrom time.Time
ModTimeTo time.Time
fileRules rules
dirRules rules
ExcludeFile string
files FilesMap // files if filesFrom
dirs FilesMap // dirs from filesFrom
Opt Opt
ModTimeFrom time.Time
ModTimeTo time.Time
fileRules rules
dirRules rules
files FilesMap // files if filesFrom
dirs FilesMap // dirs from filesFrom
}
// We use time conventions
var ageSuffixes = []struct {
Suffix string
Multiplier time.Duration
}{
{Suffix: "ms", Multiplier: time.Millisecond},
{Suffix: "s", Multiplier: time.Second},
{Suffix: "m", Multiplier: time.Minute},
{Suffix: "h", Multiplier: time.Hour},
{Suffix: "d", Multiplier: time.Hour * 24},
{Suffix: "w", Multiplier: time.Hour * 24 * 7},
{Suffix: "M", Multiplier: time.Hour * 24 * 30},
{Suffix: "y", Multiplier: time.Hour * 24 * 365},
// NewFilter parses the command line options and creates a Filter
// object. If opt is nil, then DefaultOpt will be used
func NewFilter(opt *Opt) (f *Filter, err error) {
f = &Filter{}
// Default to second
{Suffix: "", Multiplier: time.Second},
}
// Make a copy of the options
if opt != nil {
f.Opt = *opt
} else {
f.Opt = DefaultOpt
}
// ParseDuration parses a duration string. Accept ms|s|m|h|d|w|M|y suffixes. Defaults to second if not provided
func ParseDuration(age string) (time.Duration, error) {
var period float64
for _, ageSuffix := range ageSuffixes {
if strings.HasSuffix(age, ageSuffix.Suffix) {
numberString := age[:len(age)-len(ageSuffix.Suffix)]
var err error
period, err = strconv.ParseFloat(numberString, 64)
if err != nil {
return time.Duration(0), err
}
period *= float64(ageSuffix.Multiplier)
break
// Filter flags
if f.Opt.MinAge != unusedAge {
f.ModTimeTo = time.Now().Add(-time.Duration(f.Opt.MinAge))
fs.Debugf(nil, "--min-age %v to %v", f.Opt.MinAge, f.ModTimeTo)
}
if f.Opt.MaxAge != unusedAge {
f.ModTimeFrom = time.Now().Add(-time.Duration(f.Opt.MaxAge))
if !f.ModTimeTo.IsZero() && f.ModTimeFrom.Before(f.ModTimeTo) {
log.Fatal("filter: --min-age can't be larger than --max-age")
}
fs.Debugf(nil, "--max-age %v to %v", f.Opt.MaxAge, f.ModTimeFrom)
}
return time.Duration(period), nil
}
// NewFilter parses the command line options and creates a Filter object
func NewFilter() (f *Filter, err error) {
f = &Filter{
DeleteExcluded: *deleteExcluded,
MinSize: int64(minSize),
MaxSize: int64(maxSize),
}
addImplicitExclude := false
foundExcludeRule := false
if includeRule != nil {
for _, rule := range *includeRule {
err = f.Add(true, rule)
if err != nil {
return nil, err
}
addImplicitExclude = true
for _, rule := range f.Opt.IncludeRule {
err = f.Add(true, rule)
if err != nil {
return nil, err
}
addImplicitExclude = true
}
if includeFrom != nil {
for _, rule := range *includeFrom {
err := forEachLine(rule, func(line string) error {
return f.Add(true, line)
})
if err != nil {
return nil, err
}
addImplicitExclude = true
for _, rule := range f.Opt.IncludeFrom {
err := forEachLine(rule, func(line string) error {
return f.Add(true, line)
})
if err != nil {
return nil, err
}
addImplicitExclude = true
}
if excludeRule != nil {
for _, rule := range *excludeRule {
err = f.Add(false, rule)
if err != nil {
return nil, err
}
foundExcludeRule = true
for _, rule := range f.Opt.ExcludeRule {
err = f.Add(false, rule)
if err != nil {
return nil, err
}
foundExcludeRule = true
}
if excludeFrom != nil {
for _, rule := range *excludeFrom {
err := forEachLine(rule, func(line string) error {
return f.Add(false, line)
})
if err != nil {
return nil, err
}
foundExcludeRule = true
for _, rule := range f.Opt.ExcludeFrom {
err := forEachLine(rule, func(line string) error {
return f.Add(false, line)
})
if err != nil {
return nil, err
}
foundExcludeRule = true
}
if addImplicitExclude && foundExcludeRule {
Infof(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
fs.Infof(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
}
if filterRule != nil {
for _, rule := range *filterRule {
err = f.AddRule(rule)
if err != nil {
return nil, err
}
for _, rule := range f.Opt.FilterRule {
err = f.AddRule(rule)
if err != nil {
return nil, err
}
}
if filterFrom != nil {
for _, rule := range *filterFrom {
err := forEachLine(rule, f.AddRule)
if err != nil {
return nil, err
}
for _, rule := range f.Opt.FilterFrom {
err := forEachLine(rule, f.AddRule)
if err != nil {
return nil, err
}
}
if filesFrom != nil {
for _, rule := range *filesFrom {
f.initAddFile() // init to show --files-from set even if no files within
err := forEachLine(rule, func(line string) error {
return f.AddFile(line)
})
if err != nil {
return nil, err
}
for _, rule := range f.Opt.FilesFrom {
f.initAddFile() // init to show --files-from set even if no files within
err := forEachLine(rule, func(line string) error {
return f.AddFile(line)
})
if err != nil {
return nil, err
}
}
f.ExcludeFile = *excludeFile
if addImplicitExclude {
err = f.Add(false, "/**")
if err != nil {
return nil, err
}
}
if *minAge != "" {
duration, err := ParseDuration(*minAge)
if err != nil {
return nil, err
}
f.ModTimeTo = time.Now().Add(-duration)
Debugf(nil, "--min-age %v to %v", duration, f.ModTimeTo)
}
if *maxAge != "" {
duration, err := ParseDuration(*maxAge)
if err != nil {
return nil, err
}
f.ModTimeFrom = time.Now().Add(-duration)
if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) {
return nil, errors.New("argument --min-age can't be larger than --max-age")
}
Debugf(nil, "--max-age %v to %v", duration, f.ModTimeFrom)
}
if Config.Dump&DumpFilters != 0 {
if fs.Config.Dump&fs.DumpFilters != 0 {
fmt.Println("--- start filters ---")
fmt.Println(f.DumpFilters())
fmt.Println("--- end filters ---")
@@ -264,6 +213,14 @@ func NewFilter() (f *Filter, err error) {
return f, nil
}
func mustNewFilter(opt *Opt) *Filter {
f, err := NewFilter(opt)
if err != nil {
panic(err)
}
return f
}
// addDirGlobs adds directory globs from the file glob passed in
func (f *Filter) addDirGlobs(Include bool, glob string) error {
for _, dirGlob := range globToDirGlobs(glob) {
@@ -379,11 +336,11 @@ func (f *Filter) InActive() bool {
return (f.files == nil &&
f.ModTimeFrom.IsZero() &&
f.ModTimeTo.IsZero() &&
f.MinSize < 0 &&
f.MaxSize < 0 &&
f.Opt.MinSize < 0 &&
f.Opt.MaxSize < 0 &&
f.fileRules.len() == 0 &&
f.dirRules.len() == 0 &&
len(f.ExcludeFile) == 0)
len(f.Opt.ExcludeFile) == 0)
}
// includeRemote returns whether this remote passes the filter rules.
@@ -397,15 +354,15 @@ func (f *Filter) includeRemote(remote string) bool {
}
// ListContainsExcludeFile checks if exclude file is present in the list.
func (f *Filter) ListContainsExcludeFile(entries DirEntries) bool {
if len(f.ExcludeFile) == 0 {
func (f *Filter) ListContainsExcludeFile(entries fs.DirEntries) bool {
if len(f.Opt.ExcludeFile) == 0 {
return false
}
for _, entry := range entries {
obj, ok := entry.(Object)
obj, ok := entry.(fs.Object)
if ok {
basename := path.Base(obj.Remote())
if basename == f.ExcludeFile {
if basename == f.Opt.ExcludeFile {
return true
}
}
@@ -415,7 +372,7 @@ func (f *Filter) ListContainsExcludeFile(entries DirEntries) bool {
// IncludeDirectory returns a function which checks whether this
// directory should be included in the sync or not.
func (f *Filter) IncludeDirectory(fs Fs) func(string) (bool, error) {
func (f *Filter) IncludeDirectory(fs fs.Fs) func(string) (bool, error) {
return func(remote string) (bool, error) {
remote = strings.Trim(remote, "/")
// first check if we need to remove directory based on
@@ -447,9 +404,9 @@ func (f *Filter) IncludeDirectory(fs Fs) func(string) (bool, error) {
// DirContainsExcludeFile checks if exclude file is present in a
// directroy. If fs is nil, it works properly if ExcludeFile is an
// empty string (for testing).
func (f *Filter) DirContainsExcludeFile(fs Fs, remote string) (bool, error) {
if len(Config.Filter.ExcludeFile) > 0 {
exists, err := FileExists(fs, path.Join(remote, Config.Filter.ExcludeFile))
func (f *Filter) DirContainsExcludeFile(fremote fs.Fs, remote string) (bool, error) {
if len(f.Opt.ExcludeFile) > 0 {
exists, err := fs.FileExists(fremote, path.Join(remote, f.Opt.ExcludeFile))
if err != nil {
return false, err
}
@@ -474,10 +431,10 @@ func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
if !f.ModTimeTo.IsZero() && modTime.After(f.ModTimeTo) {
return false
}
if f.MinSize >= 0 && size < f.MinSize {
if f.Opt.MinSize >= 0 && size < int64(f.Opt.MinSize) {
return false
}
if f.MaxSize >= 0 && size > f.MaxSize {
if f.Opt.MaxSize >= 0 && size > int64(f.Opt.MaxSize) {
return false
}
return f.includeRemote(remote)
@@ -486,7 +443,7 @@ func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
// IncludeObject returns whether this object should be included into
// the sync or not. This is a convenience function to avoid calling
// o.ModTime(), which is an expensive operation.
func (f *Filter) IncludeObject(o Object) bool {
func (f *Filter) IncludeObject(o fs.Object) bool {
var modTime time.Time
if !f.ModTimeFrom.IsZero() || !f.ModTimeTo.IsZero() {
@@ -506,7 +463,7 @@ func forEachLine(path string, fn func(string) error) (err error) {
if err != nil {
return err
}
defer CheckClose(in, &err)
defer fs.CheckClose(in, &err)
scanner := bufio.NewScanner(in)
for scanner.Scan() {
line := scanner.Text()

View File

@@ -1,4 +1,4 @@
package fs
package filter
import (
"fmt"
@@ -8,47 +8,17 @@ import (
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAgeSuffix(t *testing.T) {
for _, test := range []struct {
in string
want float64
err bool
}{
{"0", 0, false},
{"", 0, true},
{"1ms", float64(time.Millisecond), false},
{"1s", float64(time.Second), false},
{"1m", float64(time.Minute), false},
{"1h", float64(time.Hour), false},
{"1d", float64(time.Hour) * 24, false},
{"1w", float64(time.Hour) * 24 * 7, false},
{"1M", float64(time.Hour) * 24 * 30, false},
{"1y", float64(time.Hour) * 24 * 365, false},
{"1.5y", float64(time.Hour) * 24 * 365 * 1.5, false},
{"-1s", -float64(time.Second), false},
{"1.s", float64(time.Second), false},
{"1x", 0, true},
} {
duration, err := ParseDuration(test.in)
if test.err {
require.Error(t, err)
} else {
require.NoError(t, err)
}
assert.Equal(t, test.want, float64(duration))
}
}
func TestNewFilterDefault(t *testing.T) {
f, err := NewFilter()
f, err := NewFilter(nil)
require.NoError(t, err)
assert.False(t, f.DeleteExcluded)
assert.Equal(t, int64(-1), f.MinSize)
assert.Equal(t, int64(-1), f.MaxSize)
assert.False(t, f.Opt.DeleteExcluded)
assert.Equal(t, fs.SizeSuffix(-1), f.Opt.MinSize)
assert.Equal(t, fs.SizeSuffix(-1), f.Opt.MaxSize)
assert.Len(t, f.fileRules.rules, 0)
assert.Len(t, f.dirRules.rules, 0)
assert.Nil(t, f.files)
@@ -70,22 +40,22 @@ func testFile(t *testing.T, contents string) string {
}
func TestNewFilterFull(t *testing.T) {
mins := int64(100 * 1024)
maxs := int64(1000 * 1024)
isFalse := false
isTrue := true
Opt := DefaultOpt
mins := fs.SizeSuffix(100 * 1024)
maxs := fs.SizeSuffix(1000 * 1024)
// Set up the input
deleteExcluded = &isTrue
filterRule = &[]string{"- filter1", "- filter1b"}
filterFrom = &[]string{testFile(t, "#comment\n+ filter2\n- filter3\n")}
excludeRule = &[]string{"exclude1"}
excludeFrom = &[]string{testFile(t, "#comment\nexclude2\nexclude3\n")}
includeRule = &[]string{"include1"}
includeFrom = &[]string{testFile(t, "#comment\ninclude2\ninclude3\n")}
filesFrom = &[]string{testFile(t, "#comment\nfiles1\nfiles2\n")}
minSize = SizeSuffix(mins)
maxSize = SizeSuffix(maxs)
Opt.DeleteExcluded = true
Opt.FilterRule = []string{"- filter1", "- filter1b"}
Opt.FilterFrom = []string{testFile(t, "#comment\n+ filter2\n- filter3\n")}
Opt.ExcludeRule = []string{"exclude1"}
Opt.ExcludeFrom = []string{testFile(t, "#comment\nexclude2\nexclude3\n")}
Opt.IncludeRule = []string{"include1"}
Opt.IncludeFrom = []string{testFile(t, "#comment\ninclude2\ninclude3\n")}
Opt.FilesFrom = []string{testFile(t, "#comment\nfiles1\nfiles2\n")}
Opt.MinSize = mins
Opt.MaxSize = maxs
rm := func(p string) {
err := os.Remove(p)
@@ -95,27 +65,17 @@ func TestNewFilterFull(t *testing.T) {
}
// Reset the input
defer func() {
rm((*filterFrom)[0])
rm((*excludeFrom)[0])
rm((*includeFrom)[0])
rm((*filesFrom)[0])
minSize = -1
maxSize = -1
deleteExcluded = &isFalse
filterRule = nil
filterFrom = nil
excludeRule = nil
excludeFrom = nil
includeRule = nil
includeFrom = nil
filesFrom = nil
rm(Opt.FilterFrom[0])
rm(Opt.ExcludeFrom[0])
rm(Opt.IncludeFrom[0])
rm(Opt.FilesFrom[0])
}()
f, err := NewFilter()
f, err := NewFilter(&Opt)
require.NoError(t, err)
assert.True(t, f.DeleteExcluded)
assert.Equal(t, f.MinSize, mins)
assert.Equal(t, f.MaxSize, maxs)
assert.True(t, f.Opt.DeleteExcluded)
assert.Equal(t, f.Opt.MinSize, mins)
assert.Equal(t, f.Opt.MaxSize, maxs)
got := f.DumpFilters()
want := `--- File filter rules ---
+ (^|/)include1$
@@ -153,7 +113,7 @@ type includeTest struct {
func testInclude(t *testing.T, f *Filter, tests []includeTest) {
for _, test := range tests {
got := f.Include(test.in, test.size, time.Unix(test.modTime, 0))
assert.Equal(t, test.want, got, test.in, test.size, test.modTime)
assert.Equal(t, test.want, got, fmt.Sprintf("in=%q, size=%v, modTime=%v", test.in, test.size, time.Unix(test.modTime, 0)))
}
}
@@ -171,7 +131,7 @@ func testDirInclude(t *testing.T, f *Filter, tests []includeDirTest) {
}
func TestNewFilterIncludeFiles(t *testing.T) {
f, err := NewFilter()
f, err := NewFilter(nil)
require.NoError(t, err)
err = f.AddFile("file1.jpg")
require.NoError(t, err)
@@ -192,7 +152,7 @@ func TestNewFilterIncludeFiles(t *testing.T) {
}
func TestNewFilterIncludeFilesDirs(t *testing.T) {
f, err := NewFilter()
f, err := NewFilter(nil)
require.NoError(t, err)
for _, path := range []string{
"path/to/dir/file1.png",
@@ -224,9 +184,9 @@ func TestNewFilterIncludeFilesDirs(t *testing.T) {
}
func TestNewFilterMinSize(t *testing.T) {
f, err := NewFilter()
f, err := NewFilter(nil)
require.NoError(t, err)
f.MinSize = 100
f.Opt.MinSize = 100
testInclude(t, f, []includeTest{
{"file1.jpg", 100, 0, true},
{"file2.jpg", 101, 0, true},
@@ -236,9 +196,9 @@ func TestNewFilterMinSize(t *testing.T) {
}
func TestNewFilterMaxSize(t *testing.T) {
f, err := NewFilter()
f, err := NewFilter(nil)
require.NoError(t, err)
f.MaxSize = 100
f.Opt.MaxSize = 100
testInclude(t, f, []includeTest{
{"file1.jpg", 100, 0, true},
{"file2.jpg", 101, 0, false},
@@ -248,7 +208,7 @@ func TestNewFilterMaxSize(t *testing.T) {
}
func TestNewFilterMinAndMaxAge(t *testing.T) {
f, err := NewFilter()
f, err := NewFilter(nil)
require.NoError(t, err)
f.ModTimeFrom = time.Unix(1440000002, 0)
f.ModTimeTo = time.Unix(1440000003, 0)
@@ -263,7 +223,7 @@ func TestNewFilterMinAndMaxAge(t *testing.T) {
}
func TestNewFilterMinAge(t *testing.T) {
f, err := NewFilter()
f, err := NewFilter(nil)
require.NoError(t, err)
f.ModTimeTo = time.Unix(1440000002, 0)
testInclude(t, f, []includeTest{
@@ -277,7 +237,7 @@ func TestNewFilterMinAge(t *testing.T) {
}
func TestNewFilterMaxAge(t *testing.T) {
f, err := NewFilter()
f, err := NewFilter(nil)
require.NoError(t, err)
f.ModTimeFrom = time.Unix(1440000002, 0)
testInclude(t, f, []includeTest{
@@ -291,7 +251,7 @@ func TestNewFilterMaxAge(t *testing.T) {
}
func TestNewFilterMatches(t *testing.T) {
f, err := NewFilter()
f, err := NewFilter(nil)
require.NoError(t, err)
add := func(s string) {
err := f.AddRule(s)
@@ -396,7 +356,7 @@ func TestFilterAddDirRuleOrFileRule(t *testing.T) {
+ (^|/)a/$`,
},
} {
f, err := NewFilter()
f, err := NewFilter(nil)
require.NoError(t, err)
err = f.Add(test.included, test.glob)
require.NoError(t, err)
@@ -464,7 +424,7 @@ func TestFilterMatchesFromDocs(t *testing.T) {
{"\\\\.jpg", true, "\\.jpg"},
{"\\[one\\].jpg", true, "[one].jpg"},
} {
f, err := NewFilter()
f, err := NewFilter(nil)
require.NoError(t, err)
err = f.Add(true, test.glob)
require.NoError(t, err)

View File

@@ -0,0 +1,31 @@
// Package filterflags implements command line flags to set up a filter
package filterflags
import (
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/filter"
"github.com/spf13/pflag"
)
// Options set by command line flags
var (
Opt = filter.DefaultOpt
)
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync")
flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule")
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file")
flags.StringArrayVarP(flagSet, &Opt.ExcludeRule, "exclude", "", nil, "Exclude files matching pattern")
flags.StringArrayVarP(flagSet, &Opt.ExcludeFrom, "exclude-from", "", nil, "Read exclude patterns from file")
flags.StringVarP(flagSet, &Opt.ExcludeFile, "exclude-if-present", "", "", "Exclude directories if filename is present")
flags.StringArrayVarP(flagSet, &Opt.IncludeRule, "include", "", nil, "Include files matching pattern")
flags.StringArrayVarP(flagSet, &Opt.IncludeFrom, "include-from", "", nil, "Read include patterns from file")
flags.StringArrayVarP(flagSet, &Opt.FilesFrom, "files-from", "", nil, "Read list of source-file names from file")
flags.FVarP(flagSet, &Opt.MinAge, "min-age", "", "Don't transfer any file younger than this in s or suffix ms|s|m|h|d|w|M|y")
flags.FVarP(flagSet, &Opt.MaxAge, "max-age", "", "Don't transfer any file older than this in s or suffix ms|s|m|h|d|w|M|y")
flags.FVarP(flagSet, &Opt.MinSize, "min-size", "", "Don't transfer any file smaller than this in k or suffix b|k|M|G")
flags.FVarP(flagSet, &Opt.MaxSize, "max-size", "", "Don't transfer any file larger than this in k or suffix b|k|M|G")
//cvsExclude = BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
}

View File

@@ -1,6 +1,6 @@
// rsync style glob parser
package fs
package filter
import (
"bytes"

View File

@@ -1,4 +1,4 @@
package fs
package filter
import (
"testing"

View File

@@ -1,187 +0,0 @@
package fs
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSizeSuffixString(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0"},
{102, "102"},
{1024, "1k"},
{1024 * 1024, "1M"},
{1024 * 1024 * 1024, "1G"},
{10 * 1024 * 1024 * 1024, "10G"},
{10.1 * 1024 * 1024 * 1024, "10.100G"},
{-1, "off"},
{-100, "off"},
} {
ss := SizeSuffix(test.in)
got := ss.String()
assert.Equal(t, test.want, got)
}
}
func TestSizeSuffixUnit(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0 Bytes"},
{102, "102 Bytes"},
{1024, "1 kBytes"},
{1024 * 1024, "1 MBytes"},
{1024 * 1024 * 1024, "1 GBytes"},
{10 * 1024 * 1024 * 1024, "10 GBytes"},
{10.1 * 1024 * 1024 * 1024, "10.100 GBytes"},
{-1, "off"},
{-100, "off"},
} {
ss := SizeSuffix(test.in)
got := ss.Unit("Bytes")
assert.Equal(t, test.want, got)
}
}
func TestSizeSuffixSet(t *testing.T) {
for _, test := range []struct {
in string
want int64
err bool
}{
{"0", 0, false},
{"1b", 1, false},
{"102B", 102, false},
{"0.1k", 102, false},
{"0.1", 102, false},
{"1K", 1024, false},
{"1", 1024, false},
{"2.5", 1024 * 2.5, false},
{"1M", 1024 * 1024, false},
{"1.g", 1024 * 1024 * 1024, false},
{"10G", 10 * 1024 * 1024 * 1024, false},
{"off", -1, false},
{"OFF", -1, false},
{"", 0, true},
{"1p", 0, true},
{"1.p", 0, true},
{"1p", 0, true},
{"-1K", 0, true},
} {
ss := SizeSuffix(0)
err := ss.Set(test.in)
if test.err {
require.Error(t, err)
} else {
require.NoError(t, err)
}
assert.Equal(t, test.want, int64(ss))
}
}
func TestBwTimetableSet(t *testing.T) {
for _, test := range []struct {
in string
want BwTimetable
err bool
}{
{"", BwTimetable{}, true},
{"0", BwTimetable{BwTimeSlot{hhmm: 0, bandwidth: 0}}, false},
{"666", BwTimetable{BwTimeSlot{hhmm: 0, bandwidth: 666 * 1024}}, false},
{"10:20,666", BwTimetable{BwTimeSlot{hhmm: 1020, bandwidth: 666 * 1024}}, false},
{
"11:00,333 13:40,666 23:50,10M 23:59,off",
BwTimetable{
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
BwTimeSlot{hhmm: 1340, bandwidth: 666 * 1024},
BwTimeSlot{hhmm: 2350, bandwidth: 10 * 1024 * 1024},
BwTimeSlot{hhmm: 2359, bandwidth: -1},
},
false,
},
{"bad,bad", BwTimetable{}, true},
{"bad bad", BwTimetable{}, true},
{"bad", BwTimetable{}, true},
{"1000X", BwTimetable{}, true},
{"2401,666", BwTimetable{}, true},
{"1061,666", BwTimetable{}, true},
} {
tt := BwTimetable{}
err := tt.Set(test.in)
if test.err {
require.Error(t, err)
} else {
require.NoError(t, err)
}
assert.Equal(t, test.want, tt)
}
}
func TestBwTimetableLimitAt(t *testing.T) {
for _, test := range []struct {
tt BwTimetable
now time.Time
want BwTimeSlot
}{
{
BwTimetable{},
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
BwTimeSlot{hhmm: 0, bandwidth: -1},
},
{
BwTimetable{BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024}},
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
},
{
BwTimetable{
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
BwTimeSlot{hhmm: 2350, bandwidth: -1},
},
time.Date(2017, time.April, 20, 10, 15, 0, 0, time.UTC),
BwTimeSlot{hhmm: 2350, bandwidth: -1},
},
{
BwTimetable{
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
BwTimeSlot{hhmm: 2350, bandwidth: -1},
},
time.Date(2017, time.April, 20, 11, 0, 0, 0, time.UTC),
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
},
{
BwTimetable{
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
BwTimeSlot{hhmm: 2350, bandwidth: -1},
},
time.Date(2017, time.April, 20, 13, 1, 0, 0, time.UTC),
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
},
{
BwTimetable{
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
BwTimeSlot{hhmm: 2350, bandwidth: -1},
},
time.Date(2017, time.April, 20, 23, 59, 0, 0, time.UTC),
BwTimeSlot{hhmm: 2350, bandwidth: -1},
},
} {
slot := test.tt.LimitAt(test.now)
assert.Equal(t, test.want, slot)
}
}

View File

@@ -14,6 +14,8 @@ import (
"strings"
"time"
"github.com/ncw/rclone/fs/driveletter"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
)
@@ -29,7 +31,7 @@ const (
// Globals
var (
// Filesystem registry
fsRegistry []*RegInfo
Registry []*RegInfo
// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
ErrorNotFoundInConfigFile = errors.New("didn't find section in config file")
ErrorCantPurge = errors.New("can't purge directory")
@@ -103,7 +105,7 @@ type OptionExample struct {
//
// Fs modules should use this in an init() function
func Register(info *RegInfo) {
fsRegistry = append(fsRegistry, info)
Registry = append(Registry, info)
}
// Fs is the interface a cloud storage system must provide
@@ -158,7 +160,7 @@ type Info interface {
Precision() time.Duration
// Returns the supported hash types of the filesystem
Hashes() HashSet
Hashes() hash.Set
// Features returns the optional features of this Fs
Features() *Features
@@ -190,7 +192,7 @@ type ObjectInfo interface {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
Hash(HashType) (string, error)
Hash(hash.Type) (string, error)
// Storable says whether this object can be stored
Storable() bool
@@ -671,7 +673,7 @@ type Objects []Object
// ObjectPair is a pair of Objects used to describe a potential copy
// operation.
type ObjectPair struct {
src, dst Object
Src, Dst Object
}
// ObjectPairChan is a channel of ObjectPair
@@ -681,7 +683,7 @@ type ObjectPairChan chan ObjectPair
//
// Services are looked up in the config file
func Find(name string) (*RegInfo, error) {
for _, item := range fsRegistry {
for _, item := range Registry {
if item.Name == name {
return item, nil
}
@@ -702,16 +704,16 @@ func MustFind(name string) *RegInfo {
return fs
}
// Pattern to match an rclone url
var matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`)
// Matcher is a pattern to match an rclone URL
var Matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`)
// ParseRemote deconstructs a path into configName, fsPath, looking up
// the fsName in the config file (returning NotFoundInConfigFile if not found)
func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err error) {
parts := matcher.FindStringSubmatch(path)
parts := Matcher.FindStringSubmatch(path)
var fsName string
fsName, configName, fsPath = "local", "local", path
if parts != nil && !isDriveLetter(parts[1]) {
if parts != nil && !driveletter.IsDriveLetter(parts[1]) {
configName, fsPath = parts[1], parts[2]
fsName = ConfigFileGet(configName, "type")
if fsName == "" {
@@ -741,10 +743,10 @@ func NewFs(path string) (Fs, error) {
return fsInfo.NewFs(configName, fsPath)
}
// temporaryLocalFs creates a local FS in the OS's temporary directory.
// TemporaryLocalFs creates a local FS in the OS's temporary directory.
//
// No cleanup is performed, the caller must call Purge on the Fs themselves.
func temporaryLocalFs() (Fs, error) {
func TemporaryLocalFs() (Fs, error) {
path, err := ioutil.TempDir("", "rclone-spool")
if err == nil {
err = os.Remove(path)
@@ -777,3 +779,24 @@ func FileExists(fs Fs, remote string) (bool, error) {
}
return true, nil
}
// CalculateModifyWindow works out modify window for Fses passed in -
// sets Config.ModifyWindow
//
// This is the largest modify window of all the fses in use, and the
// user configured value
func CalculateModifyWindow(fss ...Fs) {
for _, f := range fss {
if f != nil {
precision := f.Precision()
if precision > Config.ModifyWindow {
Config.ModifyWindow = precision
}
if precision == ModTimeNotSupported {
Infof(f, "Modify window not supported")
return
}
}
}
Infof(fss[0], "Modify window is %s", Config.ModifyWindow)
}

View File

@@ -1,6 +1,6 @@
// +build plan9
package fs
package fserrors
// isClosedConnErrorPlatform reports whether err is an error from use
// of a closed network connection using platform specific error codes.

View File

@@ -1,6 +1,5 @@
// Errors and error handling
package fs
// Package fserrors provides errors and error handling
package fserrors
import (
"fmt"

View File

@@ -1,4 +1,4 @@
package fs
package fserrors
import (
"fmt"

View File

@@ -1,6 +1,6 @@
// +build !plan9
package fs
package fserrors
import (
"syscall"

View File

@@ -1,6 +1,6 @@
// +build windows
package fs
package fserrors
import (
"syscall"

View File

@@ -1,6 +1,5 @@
// The HTTP based parts of the config, Transport and Client
package fs
// Package fshttp contains the common http parts of the config, Transport and Client
package fshttp
import (
"bytes"
@@ -12,6 +11,7 @@ import (
"sync"
"time"
"github.com/ncw/rclone/fs"
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.6
"golang.org/x/time/rate"
)
@@ -27,15 +27,15 @@ var (
tpsBucket *rate.Limiter // for limiting number of http transactions per second
)
// Start the token bucket if necessary
func startHTTPTokenBucket() {
if Config.TPSLimit > 0 {
tpsBurst := Config.TPSLimitBurst
// StartHTTPTokenBucket starts the token bucket if necessary
func StartHTTPTokenBucket() {
if fs.Config.TPSLimit > 0 {
tpsBurst := fs.Config.TPSLimitBurst
if tpsBurst < 1 {
tpsBurst = 1
}
tpsBucket = rate.NewLimiter(rate.Limit(Config.TPSLimit), tpsBurst)
Infof(nil, "Starting HTTP transaction limiter: max %g transactions/s with burst %d", Config.TPSLimit, tpsBurst)
tpsBucket = rate.NewLimiter(rate.Limit(fs.Config.TPSLimit), tpsBurst)
fs.Infof(nil, "Starting HTTP transaction limiter: max %g transactions/s with burst %d", fs.Config.TPSLimit, tpsBurst)
}
}
@@ -108,8 +108,8 @@ func setDefaults(a, b interface{}) {
}
}
// Transport returns an http.RoundTripper with the correct timeouts
func (ci *ConfigInfo) Transport() http.RoundTripper {
// NewTransport returns an http.RoundTripper with the correct timeouts
func NewTransport(ci *fs.ConfigInfo) http.RoundTripper {
noTransport.Do(func() {
// Start with a sensible set of defaults then override.
// This also means we get new stuff when it gets added to go
@@ -120,24 +120,24 @@ func (ci *ConfigInfo) Transport() http.RoundTripper {
t.TLSHandshakeTimeout = ci.ConnectTimeout
t.ResponseHeaderTimeout = ci.Timeout
t.TLSClientConfig = &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify}
t.DisableCompression = *noGzip
t.DisableCompression = ci.NoGzip
// Set in http_old.go initTransport
// t.Dial
// Set in http_new.go initTransport
// t.DialContext
// t.IdelConnTimeout
// t.ExpectContinueTimeout
ci.initTransport(t)
initTransport(ci, t)
// Wrap that http.Transport in our own transport
transport = NewTransport(t, ci.Dump)
transport = newTransport(ci, t)
})
return transport
}
// Client returns an http.Client with the correct timeouts
func (ci *ConfigInfo) Client() *http.Client {
// NewClient returns an http.Client with the correct timeouts
func NewClient(ci *fs.ConfigInfo) *http.Client {
return &http.Client{
Transport: ci.Transport(),
Transport: NewTransport(ci),
}
}
@@ -146,16 +146,18 @@ func (ci *ConfigInfo) Client() *http.Client {
// * Does logging
type Transport struct {
*http.Transport
dump DumpFlags
dump fs.DumpFlags
filterRequest func(req *http.Request)
userAgent string
}
// NewTransport wraps the http.Transport passed in and logs all
// newTransport wraps the http.Transport passed in and logs all
// roundtrips including the body if logBody is set.
func NewTransport(transport *http.Transport, dump DumpFlags) *Transport {
func newTransport(ci *fs.ConfigInfo, transport *http.Transport) *Transport {
return &Transport{
Transport: transport,
dump: dump,
dump: ci.Dump,
userAgent: ci.UserAgent,
}
}
@@ -188,13 +190,13 @@ func checkServerTime(req *http.Request, resp *http.Response) {
}
date, err := http.ParseTime(dateString)
if err != nil {
Debugf(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err)
fs.Debugf(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err)
return
}
dt := time.Since(date)
const window = 5 * 60 * time.Second
if dt > window || dt < -window {
Logf(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt)
fs.Logf(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt)
}
checkedHostMu.Lock()
checkedHost[host] = struct{}{}
@@ -250,39 +252,39 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
if tpsBucket != nil {
tbErr := tpsBucket.Wait(context.Background()) // FIXME switch to req.Context() when we drop go1.6 support
if tbErr != nil {
Errorf(nil, "HTTP token bucket error: %v", err)
fs.Errorf(nil, "HTTP token bucket error: %v", err)
}
}
// Force user agent
req.Header.Set("User-Agent", *userAgent)
req.Header.Set("User-Agent", t.userAgent)
// Filter the request if required
if t.filterRequest != nil {
t.filterRequest(req)
}
// Logf request
if t.dump&(DumpHeaders|DumpBodies|DumpAuth|DumpRequests|DumpResponses) != 0 {
buf, _ := httputil.DumpRequestOut(req, t.dump&(DumpBodies|DumpRequests) != 0)
if t.dump&DumpAuth == 0 {
if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
buf, _ := httputil.DumpRequestOut(req, t.dump&(fs.DumpBodies|fs.DumpRequests) != 0)
if t.dump&fs.DumpAuth == 0 {
buf = cleanAuths(buf)
}
Debugf(nil, "%s", separatorReq)
Debugf(nil, "%s (req %p)", "HTTP REQUEST", req)
Debugf(nil, "%s", string(buf))
Debugf(nil, "%s", separatorReq)
fs.Debugf(nil, "%s", separatorReq)
fs.Debugf(nil, "%s (req %p)", "HTTP REQUEST", req)
fs.Debugf(nil, "%s", string(buf))
fs.Debugf(nil, "%s", separatorReq)
}
// Do round trip
resp, err = t.Transport.RoundTrip(req)
// Logf response
if t.dump&(DumpHeaders|DumpBodies|DumpAuth|DumpRequests|DumpResponses) != 0 {
Debugf(nil, "%s", separatorResp)
Debugf(nil, "%s (req %p)", "HTTP RESPONSE", req)
if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
fs.Debugf(nil, "%s", separatorResp)
fs.Debugf(nil, "%s (req %p)", "HTTP RESPONSE", req)
if err != nil {
Debugf(nil, "Error: %v", err)
fs.Debugf(nil, "Error: %v", err)
} else {
buf, _ := httputil.DumpResponse(resp, t.dump&(DumpBodies|DumpResponses) != 0)
Debugf(nil, "%s", string(buf))
buf, _ := httputil.DumpResponse(resp, t.dump&(fs.DumpBodies|fs.DumpResponses) != 0)
fs.Debugf(nil, "%s", string(buf))
}
Debugf(nil, "%s", separatorResp)
fs.Debugf(nil, "%s", separatorResp)
}
if err == nil {
checkServerTime(req, resp)
@@ -292,7 +294,7 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
// NewDialer creates a net.Dialer structure with Timeout, Keepalive
// and LocalAddr set from rclone flags.
func (ci *ConfigInfo) NewDialer() *net.Dialer {
func NewDialer(ci *fs.ConfigInfo) *net.Dialer {
dialer := &net.Dialer{
Timeout: ci.ConnectTimeout,
KeepAlive: 30 * time.Second,

View File

@@ -2,18 +2,20 @@
//+build go1.7
package fs
package fshttp
import (
"context"
"net"
"net/http"
"time"
"github.com/ncw/rclone/fs"
)
// dial with context and timeouts
func (ci *ConfigInfo) dialContextTimeout(ctx context.Context, network, address string) (net.Conn, error) {
dialer := ci.NewDialer()
func dialContextTimeout(ctx context.Context, network, address string, ci *fs.ConfigInfo) (net.Conn, error) {
dialer := NewDialer(ci)
c, err := dialer.DialContext(ctx, network, address)
if err != nil {
return c, err
@@ -22,8 +24,10 @@ func (ci *ConfigInfo) dialContextTimeout(ctx context.Context, network, address s
}
// Initialise the http.Transport for go1.7+
func (ci *ConfigInfo) initTransport(t *http.Transport) {
t.DialContext = ci.dialContextTimeout
func initTransport(ci *fs.ConfigInfo, t *http.Transport) {
t.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
return dialContextTimeout(ctx, network, addr, ci)
}
t.IdleConnTimeout = 60 * time.Second
t.ExpectContinueTimeout = ci.ConnectTimeout
}

29
fs/fshttp/http_old.go Normal file
View File

@@ -0,0 +1,29 @@
// HTTP parts pre go1.7
//+build !go1.7
package fshttp
import (
"net"
"net/http"
"github.com/ncw/rclone/fs"
)
// dial with timeouts
func dialTimeout(network, address string, ci *fs.ConfigInfo) (net.Conn, error) {
dialer := NewDialer(ci)
c, err := dialer.Dial(network, address)
if err != nil {
return c, err
}
return newTimeoutConn(c, ci.Timeout), nil
}
// Initialise the http.Transport for pre go1.7
func initTransport(ci *fs.ConfigInfo, t *http.Transport) {
t.Dial = func(network, addr string) (net.Conn, error) {
return dialTimeout(network, addr, ci)
}
}

View File

@@ -1,6 +1,6 @@
//+build go1.7
package fs
package fshttp
import (
"fmt"

View File

@@ -1,4 +1,5 @@
package fs
// Package fspath contains routines for fspath manipulation
package fspath
import (
"path"

View File

@@ -1,4 +1,4 @@
package fs
package fspath
import (
"fmt"

View File

@@ -1,4 +1,4 @@
package fs
package hash
import (
"crypto/md5"
@@ -14,8 +14,8 @@ import (
"github.com/spf13/pflag"
)
// HashType indicates a standard hashing algorithm
type HashType int
// Type indicates a standard hashing algorithm
type Type int
// ErrHashUnsupported should be returned by filesystem,
// if it is requested to deliver an unsupported hash type.
@@ -23,7 +23,7 @@ var ErrHashUnsupported = errors.New("hash type not supported")
const (
// HashMD5 indicates MD5 support
HashMD5 HashType = 1 << iota
HashMD5 Type = 1 << iota
// HashSHA1 indicates SHA-1 support
HashSHA1
@@ -33,7 +33,7 @@ const (
HashDropbox
// HashNone indicates no hashes are supported
HashNone HashType = 0
HashNone Type = 0
)
// SupportedHashes returns a set of all the supported hashes by
@@ -41,19 +41,19 @@ const (
var SupportedHashes = NewHashSet(HashMD5, HashSHA1, HashDropbox)
// HashWidth returns the width in characters for any HashType
var HashWidth = map[HashType]int{
var HashWidth = map[Type]int{
HashMD5: 32,
HashSHA1: 40,
HashDropbox: 64,
}
// HashStream will calculate hashes of all supported hash types.
func HashStream(r io.Reader) (map[HashType]string, error) {
return HashStreamTypes(r, SupportedHashes)
// Stream will calculate hashes of all supported hash types.
func Stream(r io.Reader) (map[Type]string, error) {
return StreamTypes(r, SupportedHashes)
}
// HashStreamTypes will calculate hashes of the requested hash types.
func HashStreamTypes(r io.Reader, set HashSet) (map[HashType]string, error) {
// StreamTypes will calculate hashes of the requested hash types.
func StreamTypes(r io.Reader, set Set) (map[Type]string, error) {
hashers, err := hashFromTypes(set)
if err != nil {
return nil, err
@@ -63,7 +63,7 @@ func HashStreamTypes(r io.Reader, set HashSet) (map[HashType]string, error) {
if err != nil {
return nil, err
}
var ret = make(map[HashType]string)
var ret = make(map[Type]string)
for k, v := range hashers {
ret[k] = hex.EncodeToString(v.Sum(nil))
}
@@ -72,7 +72,7 @@ func HashStreamTypes(r io.Reader, set HashSet) (map[HashType]string, error) {
// String returns a string representation of the hash type.
// The function will panic if the hash type is unknown.
func (h HashType) String() string {
func (h Type) String() string {
switch h {
case HashNone:
return "None"
@@ -89,7 +89,7 @@ func (h HashType) String() string {
}
// Set a HashType from a flag
func (h *HashType) Set(s string) error {
func (h *Type) Set(s string) error {
switch s {
case "None":
*h = HashNone
@@ -106,21 +106,21 @@ func (h *HashType) Set(s string) error {
}
// Type of the value
func (h HashType) Type() string {
func (h Type) Type() string {
return "string"
}
// Check it satisfies the interface
var _ pflag.Value = (*HashType)(nil)
var _ pflag.Value = (*Type)(nil)
// hashFromTypes will return hashers for all the requested types.
// The types must be a subset of SupportedHashes,
// and this function must support all types.
func hashFromTypes(set HashSet) (map[HashType]hash.Hash, error) {
func hashFromTypes(set Set) (map[Type]hash.Hash, error) {
if !set.SubsetOf(SupportedHashes) {
return nil, errors.Errorf("requested set %08x contains unknown hash types", int(set))
}
var hashers = make(map[HashType]hash.Hash)
var hashers = make(map[Type]hash.Hash)
types := set.Array()
for _, t := range types {
switch t {
@@ -141,7 +141,7 @@ func hashFromTypes(set HashSet) (map[HashType]hash.Hash, error) {
// hashToMultiWriter will return a set of hashers into a
// single multiwriter, where one write will update all
// the hashers.
func hashToMultiWriter(h map[HashType]hash.Hash) io.Writer {
func hashToMultiWriter(h map[Type]hash.Hash) io.Writer {
// Convert to to slice
var w = make([]io.Writer, 0, len(h))
for _, v := range h {
@@ -155,7 +155,7 @@ func hashToMultiWriter(h map[HashType]hash.Hash) io.Writer {
type MultiHasher struct {
w io.Writer
size int64
h map[HashType]hash.Hash // Hashes
h map[Type]hash.Hash // Hashes
}
// NewMultiHasher will return a hash writer that will write all
@@ -170,7 +170,7 @@ func NewMultiHasher() *MultiHasher {
// NewMultiHasherTypes will return a hash writer that will write
// the requested hash types.
func NewMultiHasherTypes(set HashSet) (*MultiHasher, error) {
func NewMultiHasherTypes(set Set) (*MultiHasher, error) {
hashers, err := hashFromTypes(set)
if err != nil {
return nil, err
@@ -187,8 +187,8 @@ func (m *MultiHasher) Write(p []byte) (n int, err error) {
// Sums returns the sums of all accumulated hashes as hex encoded
// strings.
func (m *MultiHasher) Sums() map[HashType]string {
dst := make(map[HashType]string)
func (m *MultiHasher) Sums() map[Type]string {
dst := make(map[Type]string)
for k, v := range m.h {
dst[k] = hex.EncodeToString(v.Sum(nil))
}
@@ -200,63 +200,63 @@ func (m *MultiHasher) Size() int64 {
return m.size
}
// A HashSet Indicates one or more hash types.
type HashSet int
// A Set Indicates one or more hash types.
type Set int
// NewHashSet will create a new hash set with the hash types supplied
func NewHashSet(t ...HashType) HashSet {
h := HashSet(HashNone)
func NewHashSet(t ...Type) Set {
h := Set(HashNone)
return h.Add(t...)
}
// Add one or more hash types to the set.
// Returns the modified hash set.
func (h *HashSet) Add(t ...HashType) HashSet {
func (h *Set) Add(t ...Type) Set {
for _, v := range t {
*h |= HashSet(v)
*h |= Set(v)
}
return *h
}
// Contains returns true if the
func (h HashSet) Contains(t HashType) bool {
func (h Set) Contains(t Type) bool {
return int(h)&int(t) != 0
}
// Overlap returns the overlapping hash types
func (h HashSet) Overlap(t HashSet) HashSet {
return HashSet(int(h) & int(t))
func (h Set) Overlap(t Set) Set {
return Set(int(h) & int(t))
}
// SubsetOf will return true if all types of h
// is present in the set c
func (h HashSet) SubsetOf(c HashSet) bool {
func (h Set) SubsetOf(c Set) bool {
return int(h)|int(c) == int(c)
}
// GetOne will return a hash type.
// Currently the first is returned, but it could be
// improved to return the strongest.
func (h HashSet) GetOne() HashType {
func (h Set) GetOne() Type {
v := int(h)
i := uint(0)
for v != 0 {
if v&1 != 0 {
return HashType(1 << i)
return Type(1 << i)
}
i++
v >>= 1
}
return HashType(HashNone)
return Type(HashNone)
}
// Array returns an array of all hash types in the set
func (h HashSet) Array() (ht []HashType) {
func (h Set) Array() (ht []Type) {
v := int(h)
i := uint(0)
for v != 0 {
if v&1 != 0 {
ht = append(ht, HashType(1<<i))
ht = append(ht, Type(1<<i))
}
i++
v >>= 1
@@ -265,7 +265,7 @@ func (h HashSet) Array() (ht []HashType) {
}
// Count returns the number of hash types in the set
func (h HashSet) Count() int {
func (h Set) Count() int {
if int(h) == 0 {
return 0
}
@@ -281,7 +281,7 @@ func (h HashSet) Count() int {
// String returns a string representation of the hash set.
// The function will panic if it contains an unknown type.
func (h HashSet) String() string {
func (h Set) String() string {
a := h.Array()
var r []string
for _, v := range a {
@@ -289,3 +289,12 @@ func (h HashSet) String() string {
}
return "[" + strings.Join(r, ", ") + "]"
}
// Equals checks to see if src == dst, but ignores empty strings
// and returns true if either is empty.
func Equals(src, dst string) bool {
if src == "" || dst == "" {
return true
}
return src == dst
}

View File

@@ -1,89 +1,89 @@
package fs_test
package hash_test
import (
"bytes"
"io"
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestHashSet(t *testing.T) {
var h fs.HashSet
var h hash.Set
assert.Equal(t, 0, h.Count())
a := h.Array()
assert.Len(t, a, 0)
h = h.Add(fs.HashMD5)
h = h.Add(hash.HashMD5)
assert.Equal(t, 1, h.Count())
assert.Equal(t, fs.HashMD5, h.GetOne())
assert.Equal(t, hash.HashMD5, h.GetOne())
a = h.Array()
assert.Len(t, a, 1)
assert.Equal(t, a[0], fs.HashMD5)
assert.Equal(t, a[0], hash.HashMD5)
// Test overlap, with all hashes
h = h.Overlap(fs.SupportedHashes)
h = h.Overlap(hash.SupportedHashes)
assert.Equal(t, 1, h.Count())
assert.Equal(t, fs.HashMD5, h.GetOne())
assert.True(t, h.SubsetOf(fs.SupportedHashes))
assert.True(t, h.SubsetOf(fs.NewHashSet(fs.HashMD5)))
assert.Equal(t, hash.HashMD5, h.GetOne())
assert.True(t, h.SubsetOf(hash.SupportedHashes))
assert.True(t, h.SubsetOf(hash.NewHashSet(hash.HashMD5)))
h = h.Add(fs.HashSHA1)
h = h.Add(hash.HashSHA1)
assert.Equal(t, 2, h.Count())
one := h.GetOne()
if !(one == fs.HashMD5 || one == fs.HashSHA1) {
if !(one == hash.HashMD5 || one == hash.HashSHA1) {
t.Fatalf("expected to be either MD5 or SHA1, got %v", one)
}
assert.True(t, h.SubsetOf(fs.SupportedHashes))
assert.False(t, h.SubsetOf(fs.NewHashSet(fs.HashMD5)))
assert.False(t, h.SubsetOf(fs.NewHashSet(fs.HashSHA1)))
assert.True(t, h.SubsetOf(fs.NewHashSet(fs.HashMD5, fs.HashSHA1)))
assert.True(t, h.SubsetOf(hash.SupportedHashes))
assert.False(t, h.SubsetOf(hash.NewHashSet(hash.HashMD5)))
assert.False(t, h.SubsetOf(hash.NewHashSet(hash.HashSHA1)))
assert.True(t, h.SubsetOf(hash.NewHashSet(hash.HashMD5, hash.HashSHA1)))
a = h.Array()
assert.Len(t, a, 2)
ol := h.Overlap(fs.NewHashSet(fs.HashMD5))
ol := h.Overlap(hash.NewHashSet(hash.HashMD5))
assert.Equal(t, 1, ol.Count())
assert.True(t, ol.Contains(fs.HashMD5))
assert.False(t, ol.Contains(fs.HashSHA1))
assert.True(t, ol.Contains(hash.HashMD5))
assert.False(t, ol.Contains(hash.HashSHA1))
ol = h.Overlap(fs.NewHashSet(fs.HashMD5, fs.HashSHA1))
ol = h.Overlap(hash.NewHashSet(hash.HashMD5, hash.HashSHA1))
assert.Equal(t, 2, ol.Count())
assert.True(t, ol.Contains(fs.HashMD5))
assert.True(t, ol.Contains(fs.HashSHA1))
assert.True(t, ol.Contains(hash.HashMD5))
assert.True(t, ol.Contains(hash.HashSHA1))
}
type hashTest struct {
input []byte
output map[fs.HashType]string
output map[hash.Type]string
}
var hashTestSet = []hashTest{
{
input: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
output: map[fs.HashType]string{
fs.HashMD5: "bf13fc19e5151ac57d4252e0e0f87abe",
fs.HashSHA1: "3ab6543c08a75f292a5ecedac87ec41642d12166",
fs.HashDropbox: "214d2fcf3566e94c99ad2f59bd993daca46d8521a0c447adf4b324f53fddc0c7",
output: map[hash.Type]string{
hash.HashMD5: "bf13fc19e5151ac57d4252e0e0f87abe",
hash.HashSHA1: "3ab6543c08a75f292a5ecedac87ec41642d12166",
hash.HashDropbox: "214d2fcf3566e94c99ad2f59bd993daca46d8521a0c447adf4b324f53fddc0c7",
},
},
// Empty data set
{
input: []byte{},
output: map[fs.HashType]string{
fs.HashMD5: "d41d8cd98f00b204e9800998ecf8427e",
fs.HashSHA1: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
fs.HashDropbox: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
output: map[hash.Type]string{
hash.HashMD5: "d41d8cd98f00b204e9800998ecf8427e",
hash.HashSHA1: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
hash.HashDropbox: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
},
},
}
func TestMultiHasher(t *testing.T) {
for _, test := range hashTestSet {
mh := fs.NewMultiHasher()
mh := hash.NewMultiHasher()
n, err := io.Copy(mh, bytes.NewBuffer(test.input))
require.NoError(t, err)
assert.Len(t, test.input, int(n))
@@ -103,9 +103,9 @@ func TestMultiHasher(t *testing.T) {
}
func TestMultiHasherTypes(t *testing.T) {
h := fs.HashSHA1
h := hash.HashSHA1
for _, test := range hashTestSet {
mh, err := fs.NewMultiHasherTypes(fs.NewHashSet(h))
mh, err := hash.NewMultiHasherTypes(hash.NewHashSet(h))
if err != nil {
t.Fatal(err)
}
@@ -120,7 +120,7 @@ func TestMultiHasherTypes(t *testing.T) {
func TestHashStream(t *testing.T) {
for _, test := range hashTestSet {
sums, err := fs.HashStream(bytes.NewBuffer(test.input))
sums, err := hash.Stream(bytes.NewBuffer(test.input))
require.NoError(t, err)
for k, v := range sums {
expect, ok := test.output[k]
@@ -137,9 +137,9 @@ func TestHashStream(t *testing.T) {
}
func TestHashStreamTypes(t *testing.T) {
h := fs.HashSHA1
h := hash.HashSHA1
for _, test := range hashTestSet {
sums, err := fs.HashStreamTypes(bytes.NewBuffer(test.input), fs.NewHashSet(h))
sums, err := hash.StreamTypes(bytes.NewBuffer(test.input), hash.NewHashSet(h))
require.NoError(t, err)
assert.Len(t, sums, 1)
assert.Equal(t, sums[h], test.output[h])
@@ -147,17 +147,17 @@ func TestHashStreamTypes(t *testing.T) {
}
func TestHashSetStringer(t *testing.T) {
h := fs.NewHashSet(fs.HashSHA1, fs.HashMD5, fs.HashDropbox)
h := hash.NewHashSet(hash.HashSHA1, hash.HashMD5, hash.HashDropbox)
assert.Equal(t, h.String(), "[MD5, SHA-1, DropboxHash]")
h = fs.NewHashSet(fs.HashSHA1)
h = hash.NewHashSet(hash.HashSHA1)
assert.Equal(t, h.String(), "[SHA-1]")
h = fs.NewHashSet()
h = hash.NewHashSet()
assert.Equal(t, h.String(), "[]")
}
func TestHashStringer(t *testing.T) {
h := fs.HashMD5
h := hash.HashMD5
assert.Equal(t, h.String(), "MD5")
h = fs.HashNone
h = hash.HashNone
assert.Equal(t, h.String(), "None")
}

View File

@@ -1,25 +0,0 @@
// HTTP parts pre go1.7
//+build !go1.7
package fs
import (
"net"
"net/http"
)
// dial with timeouts
func (ci *ConfigInfo) dialTimeout(network, address string) (net.Conn, error) {
dialer := ci.NewDialer()
c, err := dialer.Dial(network, address)
if err != nil {
return c, err
}
return newTimeoutConn(c, ci.Timeout), nil
}
// Initialise the http.Transport for pre go1.7
func (ci *ConfigInfo) initTransport(t *http.Transport) {
t.Dial = ci.dialTimeout
}

102
fs/list/list.go Normal file
View File

@@ -0,0 +1,102 @@
// Package list contains list functions
package list
import (
"sort"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/filter"
"github.com/pkg/errors"
)
// DirSorted reads Object and *Dir into entries for the given Fs.
//
// dir is the start directory, "" for root
//
// If includeAll is specified all files will be added, otherwise only
// files and directories passing the filter will be added.
//
// Files will be returned in sorted order
func DirSorted(f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) {
// Get unfiltered entries from the fs
entries, err = f.List(dir)
if err != nil {
return nil, err
}
// This should happen only if exclude files lives in the
// starting directory, otherwise ListDirSorted should not be
// called.
if !includeAll && filter.Active.ListContainsExcludeFile(entries) {
fs.Debugf(dir, "Excluded from sync (and deletion)")
return nil, nil
}
return filterAndSortDir(entries, includeAll, dir, filter.Active.IncludeObject, filter.Active.IncludeDirectory(f))
}
// filter (if required) and check the entries, then sort them
func filterAndSortDir(entries fs.DirEntries, includeAll bool, dir string,
IncludeObject func(o fs.Object) bool,
IncludeDirectory func(remote string) (bool, error)) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
prefix := ""
if dir != "" {
prefix = dir + "/"
}
for _, entry := range entries {
ok := true
// check includes and types
switch x := entry.(type) {
case fs.Object:
// Make sure we don't delete excluded files if not required
if !includeAll && !IncludeObject(x) {
ok = false
fs.Debugf(x, "Excluded from sync (and deletion)")
}
case fs.Directory:
if !includeAll {
include, err := IncludeDirectory(x.Remote())
if err != nil {
return nil, err
}
if !include {
ok = false
fs.Debugf(x, "Excluded from sync (and deletion)")
}
}
default:
return nil, errors.Errorf("unknown object type %T", entry)
}
// check remote name belongs in this directry
remote := entry.Remote()
switch {
case !ok:
// ignore
case !strings.HasPrefix(remote, prefix):
ok = false
fs.Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir)
case remote == prefix:
ok = false
fs.Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir)
case strings.ContainsRune(remote[len(prefix):], '/'):
ok = false
fs.Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir)
default:
// ok
}
if ok {
newEntries = append(newEntries, entry)
}
}
entries = newEntries
// Sort the directory entries by Remote
//
// We use a stable sort here just in case there are
// duplicates. Assuming the remote delivers the entries in a
// consistent order, this will give the best user experience
// in syncing as it will use the first entry for the sync
// comparison.
sort.Stable(entries)
return entries, nil
}

104
fs/list/list_test.go Normal file
View File

@@ -0,0 +1,104 @@
package list
import (
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/mockdir"
"github.com/ncw/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// NB integration tests for DirSorted are in
// fs/operations/listdirsorted_test.go
func TestFilterAndSortIncludeAll(t *testing.T) {
da := mockdir.New("a")
oA := mockobject.Object("A")
db := mockdir.New("b")
oB := mockobject.Object("B")
dc := mockdir.New("c")
oC := mockobject.Object("C")
dd := mockdir.New("d")
oD := mockobject.Object("D")
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
includeObject := func(o fs.Object) bool {
return o != oB
}
includeDirectory := func(remote string) (bool, error) {
return remote != "c", nil
}
// no filter
newEntries, err := filterAndSortDir(entries, true, "", includeObject, includeDirectory)
require.NoError(t, err)
assert.Equal(t,
newEntries,
fs.DirEntries{oA, oB, oC, oD, da, db, dc, dd},
)
// filter
newEntries, err = filterAndSortDir(entries, false, "", includeObject, includeDirectory)
require.NoError(t, err)
assert.Equal(t,
newEntries,
fs.DirEntries{oA, oC, oD, da, db, dd},
)
}
func TestFilterAndSortCheckDir(t *testing.T) {
// Check the different kinds of error when listing "dir"
da := mockdir.New("dir/")
oA := mockobject.Object("diR/a")
db := mockdir.New("dir/b")
oB := mockobject.Object("dir/B/sub")
dc := mockdir.New("dir/c")
oC := mockobject.Object("dir/C")
dd := mockdir.New("dir/d")
oD := mockobject.Object("dir/D")
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
newEntries, err := filterAndSortDir(entries, true, "dir", nil, nil)
require.NoError(t, err)
assert.Equal(t,
newEntries,
fs.DirEntries{oC, oD, db, dc, dd},
)
}
func TestFilterAndSortCheckDirRoot(t *testing.T) {
// Check the different kinds of error when listing the root ""
da := mockdir.New("")
oA := mockobject.Object("A")
db := mockdir.New("b")
oB := mockobject.Object("B/sub")
dc := mockdir.New("c")
oC := mockobject.Object("C")
dd := mockdir.New("d")
oD := mockobject.Object("D")
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
newEntries, err := filterAndSortDir(entries, true, "", nil, nil)
require.NoError(t, err)
assert.Equal(t,
newEntries,
fs.DirEntries{oA, oC, oD, db, dc, dd},
)
}
type unknownDirEntry string
func (o unknownDirEntry) String() string { return string(o) }
func (o unknownDirEntry) Remote() string { return string(o) }
func (o unknownDirEntry) ModTime() (t time.Time) { return t }
func (o unknownDirEntry) Size() int64 { return 0 }
func TestFilterAndSortUnknown(t *testing.T) {
// Check that an unknown entry produces an error
da := mockdir.New("")
oA := mockobject.Object("A")
ub := unknownDirEntry("b")
oB := mockobject.Object("B/sub")
entries := fs.DirEntries{da, oA, ub, oB}
newEntries, err := filterAndSortDir(entries, true, "", nil, nil)
assert.Error(t, err, "error")
assert.Nil(t, newEntries)
}

109
fs/log.go
View File

@@ -1,17 +1,10 @@
// Logging for rclone
package fs
import (
"fmt"
"log"
"os"
"reflect"
"runtime"
"strings"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
// LogLevel describes rclone's logs. These are a subset of the syslog log levels.
@@ -74,35 +67,25 @@ func (l *LogLevel) Type() string {
return "string"
}
// Check it satisfies the interface
var _ pflag.Value = (*LogLevel)(nil)
// Flags
var (
logFile = StringP("log-file", "", "", "Log everything to this file")
useSyslog = BoolP("syslog", "", false, "Use Syslog for logging")
syslogFacility = StringP("syslog-facility", "", "DAEMON", "Facility for syslog, eg KERN,USER,...")
)
// logPrint sends the text to the logger of level
var logPrint = func(level LogLevel, text string) {
// LogPrint sends the text to the logger of level
var LogPrint = func(level LogLevel, text string) {
text = fmt.Sprintf("%-6s: %s", level, text)
log.Print(text)
}
// logPrintf produces a log string from the arguments passed in
func logPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
// LogPrintf produces a log string from the arguments passed in
func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
out := fmt.Sprintf(text, args...)
if o != nil {
out = fmt.Sprintf("%v: %s", o, out)
}
logPrint(level, out)
LogPrint(level, out)
}
// LogLevelPrintf writes logs at the given level
func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= level {
logPrintf(level, o, text, args...)
LogPrintf(level, o, text, args...)
}
}
@@ -110,7 +93,7 @@ func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interfac
// should always be seen by the user.
func Errorf(o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= LogLevelError {
logPrintf(LogLevelError, o, text, args...)
LogPrintf(LogLevelError, o, text, args...)
}
}
@@ -121,7 +104,7 @@ func Errorf(o interface{}, text string, args ...interface{}) {
// out with the -q flag.
func Logf(o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= LogLevelNotice {
logPrintf(LogLevelNotice, o, text, args...)
LogPrintf(LogLevelNotice, o, text, args...)
}
}
@@ -130,7 +113,7 @@ func Logf(o interface{}, text string, args ...interface{}) {
// appear with the -v flag.
func Infof(o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= LogLevelInfo {
logPrintf(LogLevelInfo, o, text, args...)
LogPrintf(LogLevelInfo, o, text, args...)
}
}
@@ -138,75 +121,15 @@ func Infof(o interface{}, text string, args ...interface{}) {
// debug only. The user must have to specify -vv to see this.
func Debugf(o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= LogLevelDebug {
logPrintf(LogLevelDebug, o, text, args...)
LogPrintf(LogLevelDebug, o, text, args...)
}
}
// fnName returns the name of the calling +2 function
func fnName() string {
pc, _, _, ok := runtime.Caller(2)
name := "*Unknown*"
if ok {
name = runtime.FuncForPC(pc).Name()
dot := strings.LastIndex(name, ".")
if dot >= 0 {
name = name[dot+1:]
}
}
return name
}
// Trace debugs the entry and exit of the calling function
//
// It is designed to be used in a defer statement so it returns a
// function that logs the exit parameters.
//
// Any pointers in the exit function will be dereferenced
func Trace(o interface{}, format string, a ...interface{}) func(string, ...interface{}) {
if Config.LogLevel < LogLevelDebug {
return func(format string, a ...interface{}) {}
}
name := fnName()
logPrintf(LogLevelDebug, o, name+": "+format, a...)
return func(format string, a ...interface{}) {
for i := range a {
// read the values of the pointed to items
typ := reflect.TypeOf(a[i])
if typ.Kind() == reflect.Ptr {
value := reflect.ValueOf(a[i])
if value.IsNil() {
a[i] = nil
} else {
pointedToValue := reflect.Indirect(value)
a[i] = pointedToValue.Interface()
}
}
}
logPrintf(LogLevelDebug, o, ">"+name+": "+format, a...)
}
}
// InitLogging start the logging as per the command line flags
func InitLogging() {
// Log file output
if *logFile != "" {
f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
log.Fatalf("Failed to open log file: %v", err)
}
_, err = f.Seek(0, os.SEEK_END)
if err != nil {
Errorf(nil, "Failed to seek log file to end: %v", err)
}
log.SetOutput(f)
redirectStderr(f)
}
// Syslog output
if *useSyslog {
if *logFile != "" {
log.Fatalf("Can't use --syslog and --log-file together")
}
startSysLog()
// LogDirName returns an object for the logger, logging a root
// directory which would normally be "" as the Fs
func LogDirName(f Fs, dir string) interface{} {
if dir != "" {
return dir
}
return f
}

89
fs/log/log.go Normal file
View File

@@ -0,0 +1,89 @@
// Package log provides logging for rclone
package log
import (
"log"
"os"
"reflect"
"runtime"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/flags"
)
// Flags
var (
logFile = flags.StringP("log-file", "", "", "Log everything to this file")
useSyslog = flags.BoolP("syslog", "", false, "Use Syslog for logging")
syslogFacility = flags.StringP("syslog-facility", "", "DAEMON", "Facility for syslog, eg KERN,USER,...")
)
// fnName returns the name of the calling +2 function
func fnName() string {
pc, _, _, ok := runtime.Caller(2)
name := "*Unknown*"
if ok {
name = runtime.FuncForPC(pc).Name()
dot := strings.LastIndex(name, ".")
if dot >= 0 {
name = name[dot+1:]
}
}
return name
}
// Trace debugs the entry and exit of the calling function
//
// It is designed to be used in a defer statement so it returns a
// function that logs the exit parameters.
//
// Any pointers in the exit function will be dereferenced
func Trace(o interface{}, format string, a ...interface{}) func(string, ...interface{}) {
if fs.Config.LogLevel < fs.LogLevelDebug {
return func(format string, a ...interface{}) {}
}
name := fnName()
fs.LogPrintf(fs.LogLevelDebug, o, name+": "+format, a...)
return func(format string, a ...interface{}) {
for i := range a {
// read the values of the pointed to items
typ := reflect.TypeOf(a[i])
if typ.Kind() == reflect.Ptr {
value := reflect.ValueOf(a[i])
if value.IsNil() {
a[i] = nil
} else {
pointedToValue := reflect.Indirect(value)
a[i] = pointedToValue.Interface()
}
}
}
fs.LogPrintf(fs.LogLevelDebug, o, ">"+name+": "+format, a...)
}
}
// InitLogging start the logging as per the command line flags
func InitLogging() {
// Log file output
if *logFile != "" {
f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
log.Fatalf("Failed to open log file: %v", err)
}
_, err = f.Seek(0, os.SEEK_END)
if err != nil {
fs.Errorf(nil, "Failed to seek log file to end: %v", err)
}
log.SetOutput(f)
redirectStderr(f)
}
// Syslog output
if *useSyslog {
if *logFile != "" {
log.Fatalf("Can't use --syslog and --log-file together")
}
startSysLog()
}
}

View File

@@ -2,11 +2,15 @@
// +build !windows,!darwin,!dragonfly,!freebsd,!linux,!nacl,!netbsd,!openbsd
package fs
package log
import "os"
import (
"os"
"github.com/ncw/rclone/fs"
)
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
Errorf(nil, "Can't redirect stderr to file")
fs.Errorf(nil, "Can't redirect stderr to file")
}

View File

@@ -2,7 +2,7 @@
// +build !windows,!solaris,!plan9
package fs
package log
import (
"log"

View File

@@ -6,7 +6,7 @@
// +build windows
package fs
package log
import (
"log"

View File

@@ -2,7 +2,7 @@
// +build windows nacl plan9
package fs
package log
import (
"log"

View File

@@ -2,13 +2,15 @@
// +build !windows,!nacl,!plan9
package fs
package log
import (
"log"
"log/syslog"
"os"
"path"
"github.com/ncw/rclone/fs"
)
var (
@@ -41,23 +43,23 @@ func startSysLog() bool {
}
log.SetFlags(0)
log.SetOutput(w)
logPrint = func(level LogLevel, text string) {
fs.LogPrint = func(level fs.LogLevel, text string) {
switch level {
case LogLevelEmergency:
case fs.LogLevelEmergency:
_ = w.Emerg(text)
case LogLevelAlert:
case fs.LogLevelAlert:
_ = w.Alert(text)
case LogLevelCritical:
case fs.LogLevelCritical:
_ = w.Crit(text)
case LogLevelError:
case fs.LogLevelError:
_ = w.Err(text)
case LogLevelWarning:
case fs.LogLevelWarning:
_ = w.Warning(text)
case LogLevelNotice:
case fs.LogLevelNotice:
_ = w.Notice(text)
case LogLevelInfo:
case fs.LogLevelInfo:
_ = w.Info(text)
case LogLevelDebug:
case fs.LogLevelDebug:
_ = w.Debug(text)
}
}

6
fs/log_test.go Normal file
View File

@@ -0,0 +1,6 @@
package fs
import "github.com/spf13/pflag"
// Check it satisfies the interface
var _ pflag.Value = (*LogLevel)(nil)

View File

@@ -1,4 +1,5 @@
package fs
// Package march traverses two directories in lock step
package march
import (
"path"
@@ -6,37 +7,42 @@ import (
"strings"
"sync"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/list"
"github.com/ncw/rclone/fs/walk"
"golang.org/x/net/context"
"golang.org/x/text/unicode/norm"
)
// march traverses two Fs simultaneously, calling walker for each match
type march struct {
// March holds the data used to traverse two Fs simultaneously,
// calling callback for each match
type March struct {
// parameters
ctx context.Context
fdst Fs
fsrc Fs
fdst fs.Fs
fsrc fs.Fs
dir string
callback marcher
callback Marcher
// internal state
srcListDir listDirFn // function to call to list a directory in the src
dstListDir listDirFn // function to call to list a directory in the dst
transforms []matchTransformFn
}
// marcher is called on each match
type marcher interface {
// Marcher is called on each match
type Marcher interface {
// SrcOnly is called for a DirEntry found only in the source
SrcOnly(src DirEntry) (recurse bool)
SrcOnly(src fs.DirEntry) (recurse bool)
// DstOnly is called for a DirEntry found only in the destination
DstOnly(dst DirEntry) (recurse bool)
DstOnly(dst fs.DirEntry) (recurse bool)
// Match is called for a DirEntry found both in the source and destination
Match(dst, src DirEntry) (recurse bool)
Match(dst, src fs.DirEntry) (recurse bool)
}
// newMarch sets up a march over fsrc, and fdst calling back callback for each match
func newMarch(ctx context.Context, fdst, fsrc Fs, dir string, callback marcher) *march {
m := &march{
// New sets up a march over fsrc, and fdst calling back callback for each match
func New(ctx context.Context, fdst, fsrc fs.Fs, dir string, callback Marcher) *March {
m := &March{
ctx: ctx,
fdst: fdst,
fsrc: fsrc,
@@ -44,7 +50,7 @@ func newMarch(ctx context.Context, fdst, fsrc Fs, dir string, callback marcher)
callback: callback,
}
m.srcListDir = m.makeListDir(fsrc, false)
m.dstListDir = m.makeListDir(fdst, Config.Filter.DeleteExcluded)
m.dstListDir = m.makeListDir(fdst, filter.Active.Opt.DeleteExcluded)
// Now create the matching transform
// ..normalise the UTF8 first
m.transforms = append(m.transforms, norm.NFC.String)
@@ -61,26 +67,26 @@ func newMarch(ctx context.Context, fdst, fsrc Fs, dir string, callback marcher)
}
// list a directory into entries, err
type listDirFn func(dir string) (entries DirEntries, err error)
type listDirFn func(dir string) (entries fs.DirEntries, err error)
// makeListDir makes a listing function for the given fs and includeAll flags
func (m *march) makeListDir(f Fs, includeAll bool) listDirFn {
if !Config.UseListR || f.Features().ListR == nil {
return func(dir string) (entries DirEntries, err error) {
return ListDirSorted(f, includeAll, dir)
func (m *March) makeListDir(f fs.Fs, includeAll bool) listDirFn {
if !fs.Config.UseListR || f.Features().ListR == nil {
return func(dir string) (entries fs.DirEntries, err error) {
return list.DirSorted(f, includeAll, dir)
}
}
var (
mu sync.Mutex
started bool
dirs DirTree
dirs walk.DirTree
dirsErr error
)
return func(dir string) (entries DirEntries, err error) {
return func(dir string) (entries fs.DirEntries, err error) {
mu.Lock()
defer mu.Unlock()
if !started {
dirs, dirsErr = NewDirTree(f, m.dir, includeAll, Config.MaxDepth)
dirs, dirsErr = walk.NewDirTree(f, m.dir, includeAll, fs.Config.MaxDepth)
started = true
}
if dirsErr != nil {
@@ -88,7 +94,7 @@ func (m *march) makeListDir(f Fs, includeAll bool) listDirFn {
}
entries, ok := dirs[dir]
if !ok {
err = ErrorDirNotFound
err = fs.ErrorDirNotFound
} else {
delete(dirs, dir)
}
@@ -106,22 +112,22 @@ type listDirJob struct {
noDst bool
}
// run starts the matching process off
func (m *march) run() {
srcDepth := Config.MaxDepth
// Run starts the matching process off
func (m *March) Run() {
srcDepth := fs.Config.MaxDepth
if srcDepth < 0 {
srcDepth = MaxLevel
srcDepth = fs.MaxLevel
}
dstDepth := srcDepth
if Config.Filter.DeleteExcluded {
dstDepth = MaxLevel
if filter.Active.Opt.DeleteExcluded {
dstDepth = fs.MaxLevel
}
// Start some directory listing go routines
var wg sync.WaitGroup // sync closing of go routines
var traversing sync.WaitGroup // running directory traversals
in := make(chan listDirJob, Config.Checkers)
for i := 0; i < Config.Checkers; i++ {
in := make(chan listDirJob, fs.Config.Checkers)
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
@@ -164,7 +170,7 @@ func (m *march) run() {
}
// Check to see if the context has been cancelled
func (m *march) aborting() bool {
func (m *March) aborting() bool {
select {
case <-m.ctx.Done():
return true
@@ -175,7 +181,7 @@ func (m *march) aborting() bool {
// matchEntry is an entry plus transformed name
type matchEntry struct {
entry DirEntry
entry fs.DirEntry
leaf string
name string
}
@@ -215,7 +221,7 @@ func (es matchEntries) sort() {
}
// make a matchEntries from a newMatch entries
func newMatchEntries(entries DirEntries, transforms []matchTransformFn) matchEntries {
func newMatchEntries(entries fs.DirEntries, transforms []matchTransformFn) matchEntries {
es := make(matchEntries, len(entries))
for i := range es {
es[i].entry = entries[i]
@@ -232,7 +238,7 @@ func newMatchEntries(entries DirEntries, transforms []matchTransformFn) matchEnt
// matchPair is a matched pair of direntries returned by matchListings
type matchPair struct {
src, dst DirEntry
src, dst fs.DirEntry
}
// matchTransformFn converts a name into a form which is used for
@@ -247,11 +253,11 @@ type matchTransformFn func(name string) string
// Into matches go matchPair's of src and dst which have the same name
//
// This checks for duplicates and checks the list is sorted.
func matchListings(srcListEntries, dstListEntries DirEntries, transforms []matchTransformFn) (srcOnly DirEntries, dstOnly DirEntries, matches []matchPair) {
func matchListings(srcListEntries, dstListEntries fs.DirEntries, transforms []matchTransformFn) (srcOnly fs.DirEntries, dstOnly fs.DirEntries, matches []matchPair) {
srcList := newMatchEntries(srcListEntries, transforms)
dstList := newMatchEntries(dstListEntries, transforms)
for iSrc, iDst := 0, 0; ; iSrc, iDst = iSrc+1, iDst+1 {
var src, dst DirEntry
var src, dst fs.DirEntry
var srcName, dstName string
if iSrc < len(srcList) {
src = srcList[iSrc].entry
@@ -267,7 +273,7 @@ func matchListings(srcListEntries, dstListEntries DirEntries, transforms []match
if src != nil && iSrc > 0 {
prev := srcList[iSrc-1].name
if srcName == prev {
Logf(src, "Duplicate %s found in source - ignoring", DirEntryType(src))
fs.Logf(src, "Duplicate %s found in source - ignoring", fs.DirEntryType(src))
iDst-- // ignore the src and retry the dst
continue
} else if srcName < prev {
@@ -278,7 +284,7 @@ func matchListings(srcListEntries, dstListEntries DirEntries, transforms []match
if dst != nil && iDst > 0 {
prev := dstList[iDst-1].name
if dstName == prev {
Logf(dst, "Duplicate %s found in destination - ignoring", DirEntryType(dst))
fs.Logf(dst, "Duplicate %s found in destination - ignoring", fs.DirEntryType(dst))
iSrc-- // ignore the dst and retry the src
continue
} else if dstName < prev {
@@ -315,9 +321,9 @@ func matchListings(srcListEntries, dstListEntries DirEntries, transforms []match
// more jobs
//
// returns errors using processError
func (m *march) processJob(job listDirJob) (jobs []listDirJob) {
func (m *March) processJob(job listDirJob) (jobs []listDirJob) {
var (
srcList, dstList DirEntries
srcList, dstList fs.DirEntries
srcListErr, dstListErr error
wg sync.WaitGroup
)
@@ -341,15 +347,15 @@ func (m *march) processJob(job listDirJob) (jobs []listDirJob) {
// Wait for listings to complete and report errors
wg.Wait()
if srcListErr != nil {
Errorf(job.srcRemote, "error reading source directory: %v", srcListErr)
Stats.Error(srcListErr)
fs.Errorf(job.srcRemote, "error reading source directory: %v", srcListErr)
fs.CountError(srcListErr)
return nil
}
if dstListErr == ErrorDirNotFound {
if dstListErr == fs.ErrorDirNotFound {
// Copy the stuff anyway
} else if dstListErr != nil {
Errorf(job.dstRemote, "error reading destination directory: %v", dstListErr)
Stats.Error(dstListErr)
fs.Errorf(job.dstRemote, "error reading destination directory: %v", dstListErr)
fs.CountError(dstListErr)
return nil
}

View File

@@ -1,23 +1,25 @@
// Internal tests for march
package fs
package march
import (
"strings"
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
)
func TestNewMatchEntries(t *testing.T) {
var (
a = mockObject("path/a")
A = mockObject("path/A")
B = mockObject("path/B")
c = mockObject("path/c")
a = mockobject.Object("path/a")
A = mockobject.Object("path/A")
B = mockobject.Object("path/B")
c = mockobject.Object("path/c")
)
es := newMatchEntries(DirEntries{a, A, B, c}, nil)
es := newMatchEntries(fs.DirEntries{a, A, B, c}, nil)
assert.Equal(t, es, matchEntries{
{name: "A", leaf: "A", entry: A},
{name: "B", leaf: "B", entry: B},
@@ -25,7 +27,7 @@ func TestNewMatchEntries(t *testing.T) {
{name: "c", leaf: "c", entry: c},
})
es = newMatchEntries(DirEntries{a, A, B, c}, []matchTransformFn{strings.ToLower})
es = newMatchEntries(fs.DirEntries{a, A, B, c}, []matchTransformFn{strings.ToLower})
assert.Equal(t, es, matchEntries{
{name: "a", leaf: "A", entry: A},
{name: "a", leaf: "a", entry: a},
@@ -36,45 +38,45 @@ func TestNewMatchEntries(t *testing.T) {
func TestMatchListings(t *testing.T) {
var (
a = mockObject("a")
A = mockObject("A")
b = mockObject("b")
c = mockObject("c")
d = mockObject("d")
a = mockobject.Object("a")
A = mockobject.Object("A")
b = mockobject.Object("b")
c = mockobject.Object("c")
d = mockobject.Object("d")
)
for _, test := range []struct {
what string
input DirEntries // pairs of input src, dst
srcOnly DirEntries
dstOnly DirEntries
input fs.DirEntries // pairs of input src, dst
srcOnly fs.DirEntries
dstOnly fs.DirEntries
matches []matchPair // pairs of output
transforms []matchTransformFn
}{
{
what: "only src or dst",
input: DirEntries{
input: fs.DirEntries{
a, nil,
b, nil,
c, nil,
d, nil,
},
srcOnly: DirEntries{
srcOnly: fs.DirEntries{
a, b, c, d,
},
},
{
what: "typical sync #1",
input: DirEntries{
input: fs.DirEntries{
a, nil,
b, b,
nil, c,
nil, d,
},
srcOnly: DirEntries{
srcOnly: fs.DirEntries{
a,
},
dstOnly: DirEntries{
dstOnly: fs.DirEntries{
c, d,
},
matches: []matchPair{
@@ -83,13 +85,13 @@ func TestMatchListings(t *testing.T) {
},
{
what: "typical sync #2",
input: DirEntries{
input: fs.DirEntries{
a, a,
b, b,
nil, c,
d, d,
},
dstOnly: DirEntries{
dstOnly: fs.DirEntries{
c,
},
matches: []matchPair{
@@ -100,7 +102,7 @@ func TestMatchListings(t *testing.T) {
},
{
what: "One duplicate",
input: DirEntries{
input: fs.DirEntries{
A, A,
a, a,
a, nil,
@@ -114,7 +116,7 @@ func TestMatchListings(t *testing.T) {
},
{
what: "Two duplicates",
input: DirEntries{
input: fs.DirEntries{
a, a,
a, a,
a, nil,
@@ -125,7 +127,7 @@ func TestMatchListings(t *testing.T) {
},
{
what: "Case insensitive duplicate - no transform",
input: DirEntries{
input: fs.DirEntries{
a, a,
A, A,
},
@@ -136,7 +138,7 @@ func TestMatchListings(t *testing.T) {
},
{
what: "Case insensitive duplicate - transform to lower case",
input: DirEntries{
input: fs.DirEntries{
a, a,
A, A,
},
@@ -146,7 +148,7 @@ func TestMatchListings(t *testing.T) {
transforms: []matchTransformFn{strings.ToLower},
},
} {
var srcList, dstList DirEntries
var srcList, dstList fs.DirEntries
for i := 0; i < len(test.input); i += 2 {
src, dst := test.input[i], test.input[i+1]
if src != nil {

30
fs/mimetype.go Normal file
View File

@@ -0,0 +1,30 @@
package fs
import (
"mime"
"path"
"strings"
)
// MimeTypeFromName returns a guess at the mime type from the name
func MimeTypeFromName(remote string) (mimeType string) {
mimeType = mime.TypeByExtension(path.Ext(remote))
if !strings.ContainsRune(mimeType, '/') {
mimeType = "application/octet-stream"
}
return mimeType
}
// MimeType returns the MimeType from the object, either by calling
// the MimeTyper interface or using MimeTypeFromName
func MimeType(o ObjectInfo) (mimeType string) {
// Read the MimeType from the optional interface if available
if do, ok := o.(MimeTyper); ok {
mimeType = do.MimeType()
// Debugf(o, "Read MimeType as %q", mimeType)
if mimeType != "" {
return mimeType
}
}
return MimeTypeFromName(o.Remote())
}

View File

@@ -1,4 +1,5 @@
package fs
// Package object defines some useful Objects
package object
import (
"bytes"
@@ -6,12 +7,15 @@ import (
"io"
"io/ioutil"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
)
// NewStaticObjectInfo returns a static ObjectInfo
// If hashes is nil and fs is not nil, the hash map will be replaced with
// empty hashes of the types supported by the fs.
func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable bool, hashes map[HashType]string, fs Info) ObjectInfo {
func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable bool, hashes map[hash.Type]string, fs fs.Info) fs.ObjectInfo {
info := &staticObjectInfo{
remote: remote,
modTime: modTime,
@@ -22,7 +26,7 @@ func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable
}
if fs != nil && hashes == nil {
set := fs.Hashes().Array()
info.hashes = make(map[HashType]string)
info.hashes = make(map[hash.Type]string)
for _, ht := range set {
info.hashes[ht] = ""
}
@@ -35,24 +39,24 @@ type staticObjectInfo struct {
modTime time.Time
size int64
storable bool
hashes map[HashType]string
fs Info
hashes map[hash.Type]string
fs fs.Info
}
func (i *staticObjectInfo) Fs() Info { return i.fs }
func (i *staticObjectInfo) Fs() fs.Info { return i.fs }
func (i *staticObjectInfo) Remote() string { return i.remote }
func (i *staticObjectInfo) String() string { return i.remote }
func (i *staticObjectInfo) ModTime() time.Time { return i.modTime }
func (i *staticObjectInfo) Size() int64 { return i.size }
func (i *staticObjectInfo) Storable() bool { return i.storable }
func (i *staticObjectInfo) Hash(h HashType) (string, error) {
func (i *staticObjectInfo) Hash(h hash.Type) (string, error) {
if len(i.hashes) == 0 {
return "", ErrHashUnsupported
return "", hash.ErrHashUnsupported
}
if hash, ok := i.hashes[h]; ok {
return hash, nil
}
return "", ErrHashUnsupported
return "", hash.ErrHashUnsupported
}
// MemoryFs is an in memory Fs, it only supports FsInfo and Put
@@ -74,10 +78,10 @@ func (memoryFs) String() string { return "memory" }
func (memoryFs) Precision() time.Duration { return time.Nanosecond }
// Returns the supported hash types of the filesystem
func (memoryFs) Hashes() HashSet { return SupportedHashes }
func (memoryFs) Hashes() hash.Set { return hash.SupportedHashes }
// Features returns the optional features of this Fs
func (memoryFs) Features() *Features { return &Features{} }
func (memoryFs) Features() *fs.Features { return &fs.Features{} }
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
@@ -88,14 +92,14 @@ func (memoryFs) Features() *Features { return &Features{} }
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (memoryFs) List(dir string) (entries DirEntries, err error) {
func (memoryFs) List(dir string) (entries fs.DirEntries, err error) {
return nil, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (memoryFs) NewObject(remote string) (Object, error) {
return nil, ErrorObjectNotFound
func (memoryFs) NewObject(remote string) (fs.Object, error) {
return nil, fs.ErrorObjectNotFound
}
// Put in to the remote path with the modTime given of the given size
@@ -103,7 +107,7 @@ func (memoryFs) NewObject(remote string) (Object, error) {
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (memoryFs) Put(in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) {
func (memoryFs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o := NewMemoryObject(src.Remote(), src.ModTime(), nil)
return o, o.Update(in, src, options...)
}
@@ -119,10 +123,10 @@ func (memoryFs) Mkdir(dir string) error {
//
// Return an error if it doesn't exist or isn't empty
func (memoryFs) Rmdir(dir string) error {
return ErrorDirNotFound
return fs.ErrorDirNotFound
}
var _ Fs = MemoryFs
var _ fs.Fs = MemoryFs
// MemoryObject is an in memory object
type MemoryObject struct {
@@ -146,7 +150,7 @@ func (o *MemoryObject) Content() []byte {
}
// Fs returns read only access to the Fs that this object is part of
func (o *MemoryObject) Fs() Info {
func (o *MemoryObject) Fs() fs.Info {
return MemoryFs
}
@@ -176,8 +180,8 @@ func (o *MemoryObject) Storable() bool {
}
// Hash returns the requested hash of the contents
func (o *MemoryObject) Hash(h HashType) (string, error) {
hash, err := NewMultiHasherTypes(HashSet(h))
func (o *MemoryObject) Hash(h hash.Type) (string, error) {
hash, err := hash.NewMultiHasherTypes(hash.Set(h))
if err != nil {
return "", err
}
@@ -195,17 +199,17 @@ func (o *MemoryObject) SetModTime(modTime time.Time) error {
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *MemoryObject) Open(options ...OpenOption) (io.ReadCloser, error) {
func (o *MemoryObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
content := o.content
for _, option := range options {
switch x := option.(type) {
case *RangeOption:
case *fs.RangeOption:
content = o.content[x.Start:x.End]
case *SeekOption:
case *fs.SeekOption:
content = o.content[x.Offset:]
default:
if option.Mandatory() {
Logf(o, "Unsupported mandatory option: %v", option)
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
@@ -215,7 +219,7 @@ func (o *MemoryObject) Open(options ...OpenOption) (io.ReadCloser, error) {
// Update in to the object with the modTime given of the given size
//
// This re-uses the internal buffer if at all possible.
func (o *MemoryObject) Update(in io.Reader, src ObjectInfo, options ...OpenOption) (err error) {
func (o *MemoryObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
size := src.Size()
if size == 0 {
o.content = nil

View File

@@ -1,4 +1,4 @@
package fs_test
package object_test
import (
"bytes"
@@ -8,54 +8,51 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/object"
"github.com/stretchr/testify/assert"
)
func TestStaticObject(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
now := time.Now()
remote := "path/to/object"
size := int64(1024)
o := fs.NewStaticObjectInfo(remote, now, size, true, nil, r.Flocal)
o := object.NewStaticObjectInfo(remote, now, size, true, nil, object.MemoryFs)
assert.Equal(t, r.Flocal, o.Fs())
assert.Equal(t, object.MemoryFs, o.Fs())
assert.Equal(t, remote, o.Remote())
assert.Equal(t, remote, o.String())
assert.Equal(t, now, o.ModTime())
assert.Equal(t, size, o.Size())
assert.Equal(t, true, o.Storable())
hash, err := o.Hash(fs.HashMD5)
Hash, err := o.Hash(hash.HashMD5)
assert.NoError(t, err)
assert.Equal(t, "", hash)
assert.Equal(t, "", Hash)
o = fs.NewStaticObjectInfo(remote, now, size, true, nil, nil)
_, err = o.Hash(fs.HashMD5)
assert.Equal(t, fs.ErrHashUnsupported, err)
o = object.NewStaticObjectInfo(remote, now, size, true, nil, nil)
_, err = o.Hash(hash.HashMD5)
assert.Equal(t, hash.ErrHashUnsupported, err)
hs := map[fs.HashType]string{
fs.HashMD5: "potato",
hs := map[hash.Type]string{
hash.HashMD5: "potato",
}
o = fs.NewStaticObjectInfo(remote, now, size, true, hs, nil)
hash, err = o.Hash(fs.HashMD5)
o = object.NewStaticObjectInfo(remote, now, size, true, hs, nil)
Hash, err = o.Hash(hash.HashMD5)
assert.NoError(t, err)
assert.Equal(t, "potato", hash)
_, err = o.Hash(fs.HashSHA1)
assert.Equal(t, fs.ErrHashUnsupported, err)
assert.Equal(t, "potato", Hash)
_, err = o.Hash(hash.HashSHA1)
assert.Equal(t, hash.ErrHashUnsupported, err)
}
func TestMemoryFs(t *testing.T) {
f := fs.MemoryFs
f := object.MemoryFs
assert.Equal(t, "memory", f.Name())
assert.Equal(t, "", f.Root())
assert.Equal(t, "memory", f.String())
assert.Equal(t, time.Nanosecond, f.Precision())
assert.Equal(t, fs.SupportedHashes, f.Hashes())
assert.Equal(t, hash.SupportedHashes, f.Hashes())
assert.Equal(t, &fs.Features{}, f.Features())
entries, err := f.List("")
@@ -68,10 +65,10 @@ func TestMemoryFs(t *testing.T) {
buf := bytes.NewBufferString("potato")
now := time.Now()
src := fs.NewStaticObjectInfo("remote", now, int64(buf.Len()), true, nil, nil)
src := object.NewStaticObjectInfo("remote", now, int64(buf.Len()), true, nil, nil)
o, err = f.Put(buf, src)
assert.NoError(t, err)
hash, err := o.Hash(fs.HashSHA1)
hash, err := o.Hash(hash.HashSHA1)
assert.NoError(t, err)
assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", hash)
@@ -88,23 +85,23 @@ func TestMemoryObject(t *testing.T) {
content := []byte("potatoXXXXXXXXXXXXX")
content = content[:6] // make some extra cap
o := fs.NewMemoryObject(remote, now, content)
o := object.NewMemoryObject(remote, now, content)
assert.Equal(t, content, o.Content())
assert.Equal(t, fs.MemoryFs, o.Fs())
assert.Equal(t, object.MemoryFs, o.Fs())
assert.Equal(t, remote, o.Remote())
assert.Equal(t, remote, o.String())
assert.Equal(t, now, o.ModTime())
assert.Equal(t, int64(len(content)), o.Size())
assert.Equal(t, true, o.Storable())
hash, err := o.Hash(fs.HashMD5)
Hash, err := o.Hash(hash.HashMD5)
assert.NoError(t, err)
assert.Equal(t, "8ee2027983915ec78acc45027d874316", hash)
assert.Equal(t, "8ee2027983915ec78acc45027d874316", Hash)
hash, err = o.Hash(fs.HashSHA1)
Hash, err = o.Hash(hash.HashSHA1)
assert.NoError(t, err)
assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", hash)
assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", Hash)
newNow := now.Add(time.Minute)
err = o.SetModTime(newNow)
@@ -139,7 +136,7 @@ func TestMemoryObject(t *testing.T) {
newNow = now.Add(2 * time.Minute)
newContent := bytes.NewBufferString("Rutabaga")
assert.True(t, newContent.Len() < cap(content)) // fits within cap(content)
src := fs.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil)
src := object.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil)
err = o.Update(newContent, src)
assert.NoError(t, err)
checkContent(o, "Rutabaga")
@@ -151,7 +148,7 @@ func TestMemoryObject(t *testing.T) {
newStr = newStr + newStr + newStr + newStr + newStr + newStr + newStr + newStr + newStr + newStr
newContent = bytes.NewBufferString(newStr)
assert.True(t, newContent.Len() > cap(content)) // does not fit within cap(content)
src = fs.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil)
src = object.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil)
err = o.Update(newContent, src)
assert.NoError(t, err)
checkContent(o, newStr)
@@ -160,7 +157,7 @@ func TestMemoryObject(t *testing.T) {
// now try streaming
newStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
newContent = bytes.NewBufferString(newStr)
src = fs.NewStaticObjectInfo(remote, newNow, -1, true, nil, nil)
src = object.NewStaticObjectInfo(remote, newNow, -1, true, nil, nil)
err = o.Update(newContent, src)
assert.NoError(t, err)
checkContent(o, newStr)
@@ -168,7 +165,7 @@ func TestMemoryObject(t *testing.T) {
// and zero length
newStr = ""
newContent = bytes.NewBufferString(newStr)
src = fs.NewStaticObjectInfo(remote, newNow, 0, true, nil, nil)
src = object.NewStaticObjectInfo(remote, newNow, 0, true, nil, nil)
err = o.Update(newContent, src)
assert.NoError(t, err)
checkContent(o, newStr)

View File

@@ -0,0 +1,104 @@
package operations_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/list"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestListDirSorted is integration testing code in fs/list/list.go
// which can't be tested there due to import loops.
func TestListDirSorted(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
filter.Active.Opt.MaxSize = 10
defer func() {
filter.Active.Opt.MaxSize = -1
}()
files := []fstest.Item{
r.WriteObject("a.txt", "hello world", t1),
r.WriteObject("zend.txt", "hello", t1),
r.WriteObject("sub dir/hello world", "hello world", t1),
r.WriteObject("sub dir/hello world2", "hello world", t1),
r.WriteObject("sub dir/ignore dir/.ignore", "", t1),
r.WriteObject("sub dir/ignore dir/should be ignored", "to ignore", t1),
r.WriteObject("sub dir/sub sub dir/hello world3", "hello world", t1),
}
fstest.CheckItems(t, r.Fremote, files...)
var items fs.DirEntries
var err error
// Turn the DirEntry into a name, ending with a / if it is a
// dir
str := func(i int) string {
item := items[i]
name := item.Remote()
switch item.(type) {
case fs.Object:
case fs.Directory:
name += "/"
default:
t.Fatalf("Unknown type %+v", item)
}
return name
}
items, err = list.DirSorted(r.Fremote, true, "")
require.NoError(t, err)
require.Len(t, items, 3)
assert.Equal(t, "a.txt", str(0))
assert.Equal(t, "sub dir/", str(1))
assert.Equal(t, "zend.txt", str(2))
items, err = list.DirSorted(r.Fremote, false, "")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/", str(0))
assert.Equal(t, "zend.txt", str(1))
items, err = list.DirSorted(r.Fremote, true, "sub dir")
require.NoError(t, err)
require.Len(t, items, 4)
assert.Equal(t, "sub dir/hello world", str(0))
assert.Equal(t, "sub dir/hello world2", str(1))
assert.Equal(t, "sub dir/ignore dir/", str(2))
assert.Equal(t, "sub dir/sub sub dir/", str(3))
items, err = list.DirSorted(r.Fremote, false, "sub dir")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/ignore dir/", str(0))
assert.Equal(t, "sub dir/sub sub dir/", str(1))
// testing ignore file
filter.Active.Opt.ExcludeFile = ".ignore"
items, err = list.DirSorted(r.Fremote, false, "sub dir")
require.NoError(t, err)
require.Len(t, items, 1)
assert.Equal(t, "sub dir/sub sub dir/", str(0))
items, err = list.DirSorted(r.Fremote, false, "sub dir/ignore dir")
require.NoError(t, err)
require.Len(t, items, 0)
items, err = list.DirSorted(r.Fremote, true, "sub dir/ignore dir")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
filter.Active.Opt.ExcludeFile = ""
items, err = list.DirSorted(r.Fremote, false, "sub dir/ignore dir")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
// Internal tests for operations
package operations

View File

@@ -14,10 +14,10 @@
// fstest.CheckItems() before use. This make sure the directory
// listing is now consistent and stops cascading errors.
//
// Call fs.Stats.ResetCounters() before every fs.Sync() as it uses the
// error count internally.
// Call accounting.Stats.ResetCounters() before every fs.Sync() as it
// uses the error count internally.
package fs_test
package operations_test
import (
"bytes"
@@ -32,6 +32,12 @@ import (
_ "github.com/ncw/rclone/backend/all" // import all backends
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/list"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -52,7 +58,13 @@ func TestMain(m *testing.M) {
func TestMkdir(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
fstest.TestMkdir(t, r.Fremote)
err := operations.Mkdir(r.Fremote, "")
require.NoError(t, err)
fstest.CheckListing(t, r.Fremote, []fstest.Item{})
err = operations.Mkdir(r.Fremote, "")
require.NoError(t, err)
}
func TestLsd(t *testing.T) {
@@ -63,7 +75,7 @@ func TestLsd(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1)
var buf bytes.Buffer
err := fs.ListDir(r.Fremote, &buf)
err := operations.ListDir(r.Fremote, &buf)
require.NoError(t, err)
res := buf.String()
assert.Contains(t, res, "sub dir\n")
@@ -78,7 +90,7 @@ func TestLs(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1, file2)
var buf bytes.Buffer
err := fs.List(r.Fremote, &buf)
err := operations.List(r.Fremote, &buf)
require.NoError(t, err)
res := buf.String()
assert.Contains(t, res, " 0 empty space\n")
@@ -94,7 +106,7 @@ func TestLsLong(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1, file2)
var buf bytes.Buffer
err := fs.ListLong(r.Fremote, &buf)
err := operations.ListLong(r.Fremote, &buf)
require.NoError(t, err)
res := buf.String()
lines := strings.Split(strings.Trim(res, "\n"), "\n")
@@ -141,7 +153,7 @@ func TestHashSums(t *testing.T) {
// MD5 Sum
var buf bytes.Buffer
err := fs.Md5sum(r.Fremote, &buf)
err := operations.Md5sum(r.Fremote, &buf)
require.NoError(t, err)
res := buf.String()
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") &&
@@ -158,7 +170,7 @@ func TestHashSums(t *testing.T) {
// SHA1 Sum
buf.Reset()
err = fs.Sha1sum(r.Fremote, &buf)
err = operations.Sha1sum(r.Fremote, &buf)
require.NoError(t, err)
res = buf.String()
if !strings.Contains(res, "da39a3ee5e6b4b0d3255bfef95601890afd80709 empty space\n") &&
@@ -175,7 +187,7 @@ func TestHashSums(t *testing.T) {
// Dropbox Hash Sum
buf.Reset()
err = fs.DropboxHashSum(r.Fremote, &buf)
err = operations.DropboxHashSum(r.Fremote, &buf)
require.NoError(t, err)
res = buf.String()
if !strings.Contains(res, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 empty space\n") &&
@@ -203,7 +215,7 @@ func TestCount(t *testing.T) {
fs.Config.MaxDepth = 1
defer func() { fs.Config.MaxDepth = -1 }()
objects, size, err := fs.Count(r.Fremote)
objects, size, err := operations.Count(r.Fremote)
require.NoError(t, err)
assert.Equal(t, int64(2), objects)
assert.Equal(t, int64(60), size)
@@ -217,12 +229,12 @@ func TestDelete(t *testing.T) {
file3 := r.WriteObject("large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
fs.Config.Filter.MaxSize = 60
filter.Active.Opt.MaxSize = 60
defer func() {
fs.Config.Filter.MaxSize = -1
filter.Active.Opt.MaxSize = -1
}()
err := fs.Delete(r.Fremote)
err := operations.Delete(r.Fremote)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file3)
}
@@ -233,9 +245,9 @@ func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs) error) {
check := func(i int, wantErrors int64) {
fs.Debugf(r.Fremote, "%d: Starting check test", i)
oldErrors := fs.Stats.GetErrors()
oldErrors := accounting.Stats.GetErrors()
err := checkFunction(r.Flocal, r.Fremote)
gotErrors := fs.Stats.GetErrors() - oldErrors
gotErrors := accounting.Stats.GetErrors() - oldErrors
if wantErrors == 0 && err != nil {
t.Errorf("%d: Got error when not expecting one: %v", i, err)
}
@@ -276,11 +288,11 @@ func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs) error) {
}
func TestCheck(t *testing.T) {
testCheck(t, fs.Check)
testCheck(t, operations.Check)
}
func TestCheckDownload(t *testing.T) {
testCheck(t, fs.CheckDownload)
testCheck(t, operations.CheckDownload)
}
func TestCheckSizeOnly(t *testing.T) {
@@ -296,7 +308,7 @@ func skipIfCantDedupe(t *testing.T, f fs.Fs) {
if !f.Features().DuplicateFiles {
t.Skip("Can't test deduplicate - no duplicate files possible")
}
if !f.Hashes().Contains(fs.HashMD5) {
if !f.Hashes().Contains(hash.HashMD5) {
t.Skip("Can't test deduplicate - MD5 not supported")
}
}
@@ -311,7 +323,7 @@ func TestDeduplicateInteractive(t *testing.T) {
file3 := r.WriteUncheckedObject("one", "This is one", t1)
r.CheckWithDuplicates(t, file1, file2, file3)
err := fs.Deduplicate(r.Fremote, fs.DeduplicateInteractive)
err := operations.Deduplicate(r.Fremote, operations.DeduplicateInteractive)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file1)
@@ -327,7 +339,7 @@ func TestDeduplicateSkip(t *testing.T) {
file3 := r.WriteUncheckedObject("one", "This is another one", t1)
r.CheckWithDuplicates(t, file1, file2, file3)
err := fs.Deduplicate(r.Fremote, fs.DeduplicateSkip)
err := operations.Deduplicate(r.Fremote, operations.DeduplicateSkip)
require.NoError(t, err)
r.CheckWithDuplicates(t, file1, file3)
@@ -343,10 +355,10 @@ func TestDeduplicateFirst(t *testing.T) {
file3 := r.WriteUncheckedObject("one", "This is one BB", t1)
r.CheckWithDuplicates(t, file1, file2, file3)
err := fs.Deduplicate(r.Fremote, fs.DeduplicateFirst)
err := operations.Deduplicate(r.Fremote, operations.DeduplicateFirst)
require.NoError(t, err)
objects, size, err := fs.Count(r.Fremote)
objects, size, err := operations.Count(r.Fremote)
require.NoError(t, err)
assert.Equal(t, int64(1), objects)
if size != file1.Size && size != file2.Size && size != file3.Size {
@@ -364,7 +376,7 @@ func TestDeduplicateNewest(t *testing.T) {
file3 := r.WriteUncheckedObject("one", "This is another one", t3)
r.CheckWithDuplicates(t, file1, file2, file3)
err := fs.Deduplicate(r.Fremote, fs.DeduplicateNewest)
err := operations.Deduplicate(r.Fremote, operations.DeduplicateNewest)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file3)
@@ -380,7 +392,7 @@ func TestDeduplicateOldest(t *testing.T) {
file3 := r.WriteUncheckedObject("one", "This is another one", t3)
r.CheckWithDuplicates(t, file1, file2, file3)
err := fs.Deduplicate(r.Fremote, fs.DeduplicateOldest)
err := operations.Deduplicate(r.Fremote, operations.DeduplicateOldest)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file1)
@@ -396,10 +408,10 @@ func TestDeduplicateRename(t *testing.T) {
file3 := r.WriteUncheckedObject("one.txt", "This is another one", t3)
r.CheckWithDuplicates(t, file1, file2, file3)
err := fs.Deduplicate(r.Fremote, fs.DeduplicateRename)
err := operations.Deduplicate(r.Fremote, operations.DeduplicateRename)
require.NoError(t, err)
require.NoError(t, fs.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
require.NoError(t, walk.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
return err
}
@@ -434,7 +446,7 @@ func TestMergeDirs(t *testing.T) {
file2 := r.WriteObject("dupe2/two.txt", "This is one too", t2)
file3 := r.WriteObject("dupe3/three.txt", "This is another one", t3)
objs, dirs, err := fs.WalkGetAll(r.Fremote, "", true, 1)
objs, dirs, err := walk.GetAll(r.Fremote, "", true, 1)
require.NoError(t, err)
assert.Equal(t, 3, len(dirs))
assert.Equal(t, 0, len(objs))
@@ -446,7 +458,7 @@ func TestMergeDirs(t *testing.T) {
file3.Path = "dupe1/three.txt"
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
objs, dirs, err = fs.WalkGetAll(r.Fremote, "", true, 1)
objs, dirs, err = walk.GetAll(r.Fremote, "", true, 1)
require.NoError(t, err)
assert.Equal(t, 1, len(dirs))
assert.Equal(t, 0, len(objs))
@@ -473,7 +485,7 @@ func TestCat(t *testing.T) {
{1, 3, "BCD", "123"},
} {
var buf bytes.Buffer
err := fs.Cat(r.Fremote, &buf, test.offset, test.count)
err := operations.Cat(r.Fremote, &buf, test.offset, test.count)
require.NoError(t, err)
res := buf.String()
@@ -506,11 +518,11 @@ func TestRcat(t *testing.T) {
path2 := prefix + "big_file_from_pipe"
in := ioutil.NopCloser(strings.NewReader(data1))
_, err := fs.Rcat(r.Fremote, path1, in, t1)
_, err := operations.Rcat(r.Fremote, path1, in, t1)
require.NoError(t, err)
in = ioutil.NopCloser(strings.NewReader(data2))
_, err = fs.Rcat(r.Fremote, path2, in, t2)
_, err = operations.Rcat(r.Fremote, path2, in, t2)
require.NoError(t, err)
file1 := fstest.NewItem(path1, data1, t1)
@@ -531,13 +543,13 @@ func TestRmdirsNoLeaveRoot(t *testing.T) {
r.ForceMkdir(r.Fremote)
file1 := r.WriteObject("A1/B1/C1/one", "aaa", t1)
//..and dirs we expect to delete
require.NoError(t, fs.Mkdir(r.Fremote, "A2"))
require.NoError(t, fs.Mkdir(r.Fremote, "A1/B2"))
require.NoError(t, fs.Mkdir(r.Fremote, "A1/B2/C2"))
require.NoError(t, fs.Mkdir(r.Fremote, "A1/B1/C3"))
require.NoError(t, fs.Mkdir(r.Fremote, "A3"))
require.NoError(t, fs.Mkdir(r.Fremote, "A3/B3"))
require.NoError(t, fs.Mkdir(r.Fremote, "A3/B3/C4"))
require.NoError(t, operations.Mkdir(r.Fremote, "A2"))
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2"))
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2/C2"))
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1/C3"))
require.NoError(t, operations.Mkdir(r.Fremote, "A3"))
require.NoError(t, operations.Mkdir(r.Fremote, "A3/B3"))
require.NoError(t, operations.Mkdir(r.Fremote, "A3/B3/C4"))
//..and one more file at the end
file2 := r.WriteObject("A1/two", "bbb", t2)
@@ -562,7 +574,7 @@ func TestRmdirsNoLeaveRoot(t *testing.T) {
fs.Config.ModifyWindow,
)
require.NoError(t, fs.Rmdirs(r.Fremote, "", false))
require.NoError(t, operations.Rmdirs(r.Fremote, "", false))
fstest.CheckListingWithPrecision(
t,
@@ -587,9 +599,9 @@ func TestRmdirsLeaveRoot(t *testing.T) {
r.ForceMkdir(r.Fremote)
require.NoError(t, fs.Mkdir(r.Fremote, "A1"))
require.NoError(t, fs.Mkdir(r.Fremote, "A1/B1"))
require.NoError(t, fs.Mkdir(r.Fremote, "A1/B1/C1"))
require.NoError(t, operations.Mkdir(r.Fremote, "A1"))
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1"))
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1/C1"))
fstest.CheckListingWithPrecision(
t,
@@ -603,7 +615,7 @@ func TestRmdirsLeaveRoot(t *testing.T) {
fs.Config.ModifyWindow,
)
require.NoError(t, fs.Rmdirs(r.Fremote, "A1", true))
require.NoError(t, operations.Rmdirs(r.Fremote, "A1", true))
fstest.CheckListingWithPrecision(
t,
@@ -626,7 +638,7 @@ func TestMoveFile(t *testing.T) {
file2 := file1
file2.Path = "sub/file2"
err := fs.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
err := operations.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal)
fstest.CheckItems(t, r.Fremote, file2)
@@ -634,12 +646,12 @@ func TestMoveFile(t *testing.T) {
r.WriteFile("file1", "file1 contents", t1)
fstest.CheckItems(t, r.Flocal, file1)
err = fs.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
err = operations.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal)
fstest.CheckItems(t, r.Fremote, file2)
err = fs.MoveFile(r.Fremote, r.Fremote, file2.Path, file2.Path)
err = operations.MoveFile(r.Fremote, r.Fremote, file2.Path, file2.Path)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal)
fstest.CheckItems(t, r.Fremote, file2)
@@ -655,17 +667,17 @@ func TestCopyFile(t *testing.T) {
file2 := file1
file2.Path = "sub/file2"
err := fs.CopyFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
err := operations.CopyFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file2)
err = fs.CopyFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
err = operations.CopyFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file2)
err = fs.CopyFile(r.Fremote, r.Fremote, file2.Path, file2.Path)
err = operations.CopyFile(r.Fremote, r.Fremote, file2.Path, file2.Path)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file2)
@@ -677,7 +689,7 @@ type testFsInfo struct {
root string
stringVal string
precision time.Duration
hashes fs.HashSet
hashes hash.Set
features fs.Features
}
@@ -694,7 +706,7 @@ func (i *testFsInfo) String() string { return i.stringVal }
func (i *testFsInfo) Precision() time.Duration { return i.precision }
// Returns the supported hash types of the filesystem
func (i *testFsInfo) Hashes() fs.HashSet { return i.hashes }
func (i *testFsInfo) Hashes() hash.Set { return i.hashes }
// Returns the supported hash types of the filesystem
func (i *testFsInfo) Features() *fs.Features { return &i.features }
@@ -712,9 +724,9 @@ func TestSameConfig(t *testing.T) {
{"namey", "roott", false},
} {
b := &testFsInfo{name: test.name, root: test.root}
actual := fs.SameConfig(a, b)
actual := operations.SameConfig(a, b)
assert.Equal(t, test.expected, actual)
actual = fs.SameConfig(b, a)
actual = operations.SameConfig(b, a)
assert.Equal(t, test.expected, actual)
}
}
@@ -732,9 +744,9 @@ func TestSame(t *testing.T) {
{"namey", "roott", false},
} {
b := &testFsInfo{name: test.name, root: test.root}
actual := fs.Same(a, b)
actual := operations.Same(a, b)
assert.Equal(t, test.expected, actual)
actual = fs.Same(b, a)
actual = operations.Same(b, a)
assert.Equal(t, test.expected, actual)
}
}
@@ -758,137 +770,13 @@ func TestOverlapping(t *testing.T) {
} {
b := &testFsInfo{name: test.name, root: test.root}
what := fmt.Sprintf("(%q,%q) vs (%q,%q)", a.name, a.root, b.name, b.root)
actual := fs.Overlapping(a, b)
actual := operations.Overlapping(a, b)
assert.Equal(t, test.expected, actual, what)
actual = fs.Overlapping(b, a)
actual = operations.Overlapping(b, a)
assert.Equal(t, test.expected, actual, what)
}
}
func TestListDirSorted(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
fs.Config.Filter.MaxSize = 10
defer func() {
fs.Config.Filter.MaxSize = -1
}()
files := []fstest.Item{
r.WriteObject("a.txt", "hello world", t1),
r.WriteObject("zend.txt", "hello", t1),
r.WriteObject("sub dir/hello world", "hello world", t1),
r.WriteObject("sub dir/hello world2", "hello world", t1),
r.WriteObject("sub dir/ignore dir/.ignore", "", t1),
r.WriteObject("sub dir/ignore dir/should be ignored", "to ignore", t1),
r.WriteObject("sub dir/sub sub dir/hello world3", "hello world", t1),
}
fstest.CheckItems(t, r.Fremote, files...)
var items fs.DirEntries
var err error
// Turn the DirEntry into a name, ending with a / if it is a
// dir
str := func(i int) string {
item := items[i]
name := item.Remote()
switch item.(type) {
case fs.Object:
case fs.Directory:
name += "/"
default:
t.Fatalf("Unknown type %+v", item)
}
return name
}
items, err = fs.ListDirSorted(r.Fremote, true, "")
require.NoError(t, err)
require.Len(t, items, 3)
assert.Equal(t, "a.txt", str(0))
assert.Equal(t, "sub dir/", str(1))
assert.Equal(t, "zend.txt", str(2))
items, err = fs.ListDirSorted(r.Fremote, false, "")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/", str(0))
assert.Equal(t, "zend.txt", str(1))
items, err = fs.ListDirSorted(r.Fremote, true, "sub dir")
require.NoError(t, err)
require.Len(t, items, 4)
assert.Equal(t, "sub dir/hello world", str(0))
assert.Equal(t, "sub dir/hello world2", str(1))
assert.Equal(t, "sub dir/ignore dir/", str(2))
assert.Equal(t, "sub dir/sub sub dir/", str(3))
items, err = fs.ListDirSorted(r.Fremote, false, "sub dir")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/ignore dir/", str(0))
assert.Equal(t, "sub dir/sub sub dir/", str(1))
// testing ignore file
fs.Config.Filter.ExcludeFile = ".ignore"
items, err = fs.ListDirSorted(r.Fremote, false, "sub dir")
require.NoError(t, err)
require.Len(t, items, 1)
assert.Equal(t, "sub dir/sub sub dir/", str(0))
items, err = fs.ListDirSorted(r.Fremote, false, "sub dir/ignore dir")
require.NoError(t, err)
require.Len(t, items, 0)
items, err = fs.ListDirSorted(r.Fremote, true, "sub dir/ignore dir")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
fs.Config.Filter.ExcludeFile = ""
items, err = fs.ListDirSorted(r.Fremote, false, "sub dir/ignore dir")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
}
type byteReader struct {
c byte
}
func (br *byteReader) Read(p []byte) (n int, err error) {
if br.c == 0 {
err = io.EOF
} else if len(p) >= 1 {
p[0] = br.c
n = 1
br.c--
}
return
}
func TestReadFill(t *testing.T) {
buf := []byte{9, 9, 9, 9, 9}
n, err := fs.ReadFill(&byteReader{0}, buf)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, n)
assert.Equal(t, []byte{9, 9, 9, 9, 9}, buf)
n, err = fs.ReadFill(&byteReader{3}, buf)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 3, n)
assert.Equal(t, []byte{3, 2, 1, 9, 9}, buf)
n, err = fs.ReadFill(&byteReader{8}, buf)
assert.Equal(t, nil, err)
assert.Equal(t, 5, n)
assert.Equal(t, []byte{8, 7, 6, 5, 4}, buf)
}
type errorReader struct {
err error
}
@@ -903,19 +791,19 @@ func TestCheckEqualReaders(t *testing.T) {
b65b[len(b65b)-1] = 1
b66 := make([]byte, 66*1024)
differ, err := fs.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65a))
differ, err := operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65a))
assert.NoError(t, err)
assert.Equal(t, differ, false)
differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65b))
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65b))
assert.NoError(t, err)
assert.Equal(t, differ, true)
differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b66))
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b66))
assert.NoError(t, err)
assert.Equal(t, differ, true)
differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b66), bytes.NewBuffer(b65a))
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b66), bytes.NewBuffer(b65a))
assert.NoError(t, err)
assert.Equal(t, differ, true)
@@ -926,35 +814,35 @@ func TestCheckEqualReaders(t *testing.T) {
return io.MultiReader(r, e)
}
differ, err = fs.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65a))
differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65a))
assert.Equal(t, myErr, err)
assert.Equal(t, differ, true)
differ, err = fs.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65b))
differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65b))
assert.Equal(t, myErr, err)
assert.Equal(t, differ, true)
differ, err = fs.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b66))
differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b66))
assert.Equal(t, myErr, err)
assert.Equal(t, differ, true)
differ, err = fs.CheckEqualReaders(wrap(b66), bytes.NewBuffer(b65a))
differ, err = operations.CheckEqualReaders(wrap(b66), bytes.NewBuffer(b65a))
assert.Equal(t, myErr, err)
assert.Equal(t, differ, true)
differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65a))
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65a))
assert.Equal(t, myErr, err)
assert.Equal(t, differ, true)
differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65b))
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65b))
assert.Equal(t, myErr, err)
assert.Equal(t, differ, true)
differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b66))
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b66))
assert.Equal(t, myErr, err)
assert.Equal(t, differ, true)
differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b66), wrap(b65a))
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b66), wrap(b65a))
assert.Equal(t, myErr, err)
assert.Equal(t, differ, true)
}
@@ -967,50 +855,50 @@ func TestListFormat(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1, file2)
items, _ := fs.ListDirSorted(r.Fremote, true, "")
var list fs.ListFormat
items, _ := list.DirSorted(r.Fremote, true, "")
var list operations.ListFormat
list.AddPath()
list.SetDirSlash(false)
assert.Equal(t, "subdir", fs.ListFormatted(&items[1], &list))
assert.Equal(t, "subdir", operations.ListFormatted(&items[1], &list))
list.SetDirSlash(true)
assert.Equal(t, "subdir/", fs.ListFormatted(&items[1], &list))
assert.Equal(t, "subdir/", operations.ListFormatted(&items[1], &list))
list.SetOutput(nil)
assert.Equal(t, "", fs.ListFormatted(&items[1], &list))
assert.Equal(t, "", operations.ListFormatted(&items[1], &list))
list.AppendOutput(func() string { return "a" })
list.AppendOutput(func() string { return "b" })
assert.Equal(t, "ab", fs.ListFormatted(&items[1], &list))
assert.Equal(t, "ab", operations.ListFormatted(&items[1], &list))
list.SetSeparator(":::")
assert.Equal(t, "a:::b", fs.ListFormatted(&items[1], &list))
assert.Equal(t, "a:::b", operations.ListFormatted(&items[1], &list))
list.SetOutput(nil)
list.AddModTime()
assert.Equal(t, items[0].ModTime().Format("2006-01-02 15:04:05"), fs.ListFormatted(&items[0], &list))
assert.Equal(t, items[0].ModTime().Format("2006-01-02 15:04:05"), operations.ListFormatted(&items[0], &list))
list.SetOutput(nil)
list.AddSize()
assert.Equal(t, "1", fs.ListFormatted(&items[0], &list))
assert.Equal(t, "1", operations.ListFormatted(&items[0], &list))
list.AddPath()
list.AddModTime()
list.SetDirSlash(true)
list.SetSeparator("__SEP__")
assert.Equal(t, "1__SEP__a__SEP__"+items[0].ModTime().Format("2006-01-02 15:04:05"), fs.ListFormatted(&items[0], &list))
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"__SEP__subdir/__SEP__"+items[1].ModTime().Format("2006-01-02 15:04:05"), fs.ListFormatted(&items[1], &list))
assert.Equal(t, "1__SEP__a__SEP__"+items[0].ModTime().Format("2006-01-02 15:04:05"), operations.ListFormatted(&items[0], &list))
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"__SEP__subdir/__SEP__"+items[1].ModTime().Format("2006-01-02 15:04:05"), operations.ListFormatted(&items[1], &list))
for _, test := range []struct {
ht fs.HashType
ht hash.Type
want string
}{
{fs.HashMD5, "0cc175b9c0f1b6a831c399e269772661"},
{fs.HashSHA1, "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"},
{fs.HashDropbox, "bf5d3affb73efd2ec6c36ad3112dd933efed63c4e1cbffcfa88e2759c144f2d8"},
{hash.HashMD5, "0cc175b9c0f1b6a831c399e269772661"},
{hash.HashSHA1, "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"},
{hash.HashDropbox, "bf5d3affb73efd2ec6c36ad3112dd933efed63c4e1cbffcfa88e2759c144f2d8"},
} {
list.SetOutput(nil)
list.AddHash(test.ht)
got := fs.ListFormatted(&items[0], &list)
got := operations.ListFormatted(&items[0], &list)
if got != "UNSUPPORTED" && got != "" {
assert.Equal(t, test.want, got)
}

View File

@@ -1,92 +0,0 @@
// Internal tests for operations
package fs
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFilterAndSortIncludeAll(t *testing.T) {
da := newDir("a")
oA := mockObject("A")
db := newDir("b")
oB := mockObject("B")
dc := newDir("c")
oC := mockObject("C")
dd := newDir("d")
oD := mockObject("D")
entries := DirEntries{da, oA, db, oB, dc, oC, dd, oD}
includeObject := func(o Object) bool {
return o != oB
}
includeDirectory := func(remote string) (bool, error) {
return remote != "c", nil
}
// no filter
newEntries, err := filterAndSortDir(entries, true, "", includeObject, includeDirectory)
require.NoError(t, err)
assert.Equal(t,
newEntries,
DirEntries{oA, oB, oC, oD, da, db, dc, dd},
)
// filter
newEntries, err = filterAndSortDir(entries, false, "", includeObject, includeDirectory)
require.NoError(t, err)
assert.Equal(t,
newEntries,
DirEntries{oA, oC, oD, da, db, dd},
)
}
func TestFilterAndSortCheckDir(t *testing.T) {
// Check the different kinds of error when listing "dir"
da := newDir("dir/")
oA := mockObject("diR/a")
db := newDir("dir/b")
oB := mockObject("dir/B/sub")
dc := newDir("dir/c")
oC := mockObject("dir/C")
dd := newDir("dir/d")
oD := mockObject("dir/D")
entries := DirEntries{da, oA, db, oB, dc, oC, dd, oD}
newEntries, err := filterAndSortDir(entries, true, "dir", nil, nil)
require.NoError(t, err)
assert.Equal(t,
newEntries,
DirEntries{oC, oD, db, dc, dd},
)
}
func TestFilterAndSortCheckDirRoot(t *testing.T) {
// Check the different kinds of error when listing the root ""
da := newDir("")
oA := mockObject("A")
db := newDir("b")
oB := mockObject("B/sub")
dc := newDir("c")
oC := mockObject("C")
dd := newDir("d")
oD := mockObject("D")
entries := DirEntries{da, oA, db, oB, dc, oC, dd, oD}
newEntries, err := filterAndSortDir(entries, true, "", nil, nil)
require.NoError(t, err)
assert.Equal(t,
newEntries,
DirEntries{oA, oC, oD, db, dc, dd},
)
}
func TestFilterAndSortUnknown(t *testing.T) {
// Check that an unknown entry produces an error
da := newDir("")
oA := mockObject("A")
ub := unknownDirEntry("b")
oB := mockObject("B/sub")
entries := DirEntries{da, oA, ub, oB}
newEntries, err := filterAndSortDir(entries, true, "", nil, nil)
assert.Error(t, err, "error")
assert.Nil(t, newEntries)
}

View File

@@ -6,6 +6,8 @@ import (
"fmt"
"net/http"
"strconv"
"github.com/ncw/rclone/fs/hash"
)
// OpenOption is an interface describing options for Open
@@ -97,7 +99,7 @@ func (o *HTTPOption) Mandatory() bool {
// HashesOption defines an option used to tell the local fs to limit
// the number of hashes it calculates.
type HashesOption struct {
Hashes HashSet
Hashes hash.Set
}
// Header formats the option as an http header

68
fs/parseduration.go Normal file
View File

@@ -0,0 +1,68 @@
package fs
import (
"strconv"
"strings"
"time"
)
// Duration is a time.Duration with some more parsing options
type Duration time.Duration
// Turn Duration into a string
func (d Duration) String() string {
return time.Duration(d).String()
}
// We use time conventions
var ageSuffixes = []struct {
Suffix string
Multiplier time.Duration
}{
{Suffix: "ms", Multiplier: time.Millisecond},
{Suffix: "s", Multiplier: time.Second},
{Suffix: "m", Multiplier: time.Minute},
{Suffix: "h", Multiplier: time.Hour},
{Suffix: "d", Multiplier: time.Hour * 24},
{Suffix: "w", Multiplier: time.Hour * 24 * 7},
{Suffix: "M", Multiplier: time.Hour * 24 * 30},
{Suffix: "y", Multiplier: time.Hour * 24 * 365},
// Default to second
{Suffix: "", Multiplier: time.Second},
}
// ParseDuration parses a duration string. Accept ms|s|m|h|d|w|M|y suffixes. Defaults to second if not provided
func ParseDuration(age string) (time.Duration, error) {
var period float64
for _, ageSuffix := range ageSuffixes {
if strings.HasSuffix(age, ageSuffix.Suffix) {
numberString := age[:len(age)-len(ageSuffix.Suffix)]
var err error
period, err = strconv.ParseFloat(numberString, 64)
if err != nil {
return time.Duration(0), err
}
period *= float64(ageSuffix.Multiplier)
break
}
}
return time.Duration(period), nil
}
// Set a Duration
func (d *Duration) Set(s string) error {
duration, err := ParseDuration(s)
if err != nil {
return err
}
*d = Duration(duration)
return nil
}
// Type of the value
func (d Duration) Type() string {
return "time.Duration"
}

44
fs/parseduration_test.go Normal file
View File

@@ -0,0 +1,44 @@
package fs
import (
"testing"
"time"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interface
var _ pflag.Value = (*Duration)(nil)
func TestParseDuration(t *testing.T) {
for _, test := range []struct {
in string
want float64
err bool
}{
{"0", 0, false},
{"", 0, true},
{"1ms", float64(time.Millisecond), false},
{"1s", float64(time.Second), false},
{"1m", float64(time.Minute), false},
{"1h", float64(time.Hour), false},
{"1d", float64(time.Hour) * 24, false},
{"1w", float64(time.Hour) * 24 * 7, false},
{"1M", float64(time.Hour) * 24 * 30, false},
{"1y", float64(time.Hour) * 24 * 365, false},
{"1.5y", float64(time.Hour) * 24 * 365 * 1.5, false},
{"-1s", -float64(time.Second), false},
{"1.s", float64(time.Second), false},
{"1x", 0, true},
} {
duration, err := ParseDuration(test.in)
if test.err {
require.Error(t, err)
} else {
require.NoError(t, err)
}
assert.Equal(t, test.want, float64(duration))
}
}

View File

@@ -1,96 +0,0 @@
package fs
import (
"io"
"github.com/pkg/errors"
)
// A RepeatableReader implements the io.ReadSeeker it allow to seek cached data
// back and forth within the reader but will only read data from the internal Reader as necessary
// and will play nicely with the Account and io.LimitedReader to reflect current speed
type RepeatableReader struct {
in io.Reader // Input reader
i int64 // current reading index
b []byte // internal cache buffer
}
var _ io.ReadSeeker = (*RepeatableReader)(nil)
// Seek implements the io.Seeker interface.
// If seek position is passed the cache buffer length the function will return
// the maximum offset that can be used and "fs.RepeatableReader.Seek: offset is unavailable" Error
func (r *RepeatableReader) Seek(offset int64, whence int) (int64, error) {
var abs int64
cacheLen := int64(len(r.b))
switch whence {
case 0: //io.SeekStart
abs = offset
case 1: //io.SeekCurrent
abs = r.i + offset
case 2: //io.SeekEnd
abs = cacheLen + offset
default:
return 0, errors.New("fs.RepeatableReader.Seek: invalid whence")
}
if abs < 0 {
return 0, errors.New("fs.RepeatableReader.Seek: negative position")
}
if abs > cacheLen {
return offset - (abs - cacheLen), errors.New("fs.RepeatableReader.Seek: offset is unavailable")
}
r.i = abs
return abs, nil
}
// Read data from original Reader into bytes
// Data is either served from the underlying Reader or from cache if was already read
func (r *RepeatableReader) Read(b []byte) (n int, err error) {
cacheLen := int64(len(r.b))
if r.i == cacheLen {
n, err = r.in.Read(b)
if n > 0 {
r.b = append(r.b, b[:n]...)
}
} else {
n = copy(b, r.b[r.i:])
}
r.i += int64(n)
return n, err
}
// NewRepeatableReader create new repeatable reader from Reader r
func NewRepeatableReader(r io.Reader) *RepeatableReader {
return &RepeatableReader{in: r}
}
// NewRepeatableReaderSized create new repeatable reader from Reader r
// with an initial buffer of size.
func NewRepeatableReaderSized(r io.Reader, size int) *RepeatableReader {
return &RepeatableReader{
in: r,
b: make([]byte, 0, size),
}
}
// NewRepeatableLimitReader create new repeatable reader from Reader r
// with an initial buffer of size wrapped in a io.LimitReader to read
// only size.
func NewRepeatableLimitReader(r io.Reader, size int) *RepeatableReader {
return NewRepeatableReaderSized(io.LimitReader(r, int64(size)), size)
}
// NewRepeatableReaderBuffer create new repeatable reader from Reader r
// using the buffer passed in.
func NewRepeatableReaderBuffer(r io.Reader, buf []byte) *RepeatableReader {
return &RepeatableReader{
in: r,
b: buf[:0],
}
}
// NewRepeatableLimitReaderBuffer create new repeatable reader from
// Reader r and buf wrapped in a io.LimitReader to read only size.
func NewRepeatableLimitReaderBuffer(r io.Reader, buf []byte, size int64) *RepeatableReader {
return NewRepeatableReaderBuffer(io.LimitReader(r, int64(size)), buf)
}

View File

@@ -1,100 +0,0 @@
package fs
import (
"bytes"
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRepeatableReader(t *testing.T) {
var dst []byte
var n int
var pos int64
var err error
b := []byte("Testbuffer")
buf := bytes.NewBuffer(b)
r := NewRepeatableReader(buf)
dst = make([]byte, 100)
n, err = r.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 10, n)
require.Equal(t, b, dst[0:10])
// Test read EOF
n, err = r.Read(dst)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, n)
// Test Seek Back to start
dst = make([]byte, 10)
pos, err = r.Seek(0, 0)
assert.Nil(t, err)
require.Equal(t, 0, int(pos))
n, err = r.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 10, n)
require.Equal(t, b, dst)
// Test partial read
buf = bytes.NewBuffer(b)
r = NewRepeatableReader(buf)
dst = make([]byte, 5)
n, err = r.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
require.Equal(t, b[0:5], dst)
n, err = r.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
require.Equal(t, b[5:], dst)
// Test Seek
buf = bytes.NewBuffer(b)
r = NewRepeatableReader(buf)
// Should not allow seek past cache index
pos, err = r.Seek(5, 1)
assert.NotNil(t, err)
assert.Equal(t, "fs.RepeatableReader.Seek: offset is unavailable", err.Error())
assert.Equal(t, 0, int(pos))
// Should not allow seek to negative position start
pos, err = r.Seek(-1, 1)
assert.NotNil(t, err)
assert.Equal(t, "fs.RepeatableReader.Seek: negative position", err.Error())
assert.Equal(t, 0, int(pos))
// Should not allow seek with invalid whence
pos, err = r.Seek(0, 3)
assert.NotNil(t, err)
assert.Equal(t, "fs.RepeatableReader.Seek: invalid whence", err.Error())
assert.Equal(t, 0, int(pos))
// Should seek from index with io.SeekCurrent(1) whence
dst = make([]byte, 5)
_, _ = r.Read(dst)
pos, err = r.Seek(-3, 1)
assert.Nil(t, err)
require.Equal(t, 2, int(pos))
pos, err = r.Seek(1, 1)
assert.Nil(t, err)
require.Equal(t, 3, int(pos))
// Should seek from cache end with io.SeekEnd(2) whence
pos, err = r.Seek(-3, 2)
assert.Nil(t, err)
require.Equal(t, 2, int(pos))
// Should read from seek postion and past it
dst = make([]byte, 5)
n, err = io.ReadFull(r, dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
assert.Equal(t, b[2:7], dst)
}

102
fs/sizesuffix.go Normal file
View File

@@ -0,0 +1,102 @@
package fs
// SizeSuffix is parsed by flag with k/M/G suffixes
import (
"fmt"
"math"
"strconv"
"strings"
"github.com/pkg/errors"
)
// SizeSuffix is an int64 with a friendly way of printing setting
type SizeSuffix int64
// Turn SizeSuffix into a string and a suffix
func (x SizeSuffix) string() (string, string) {
scaled := float64(0)
suffix := ""
switch {
case x < 0:
return "off", ""
case x == 0:
return "0", ""
case x < 1024:
scaled = float64(x)
suffix = ""
case x < 1024*1024:
scaled = float64(x) / 1024
suffix = "k"
case x < 1024*1024*1024:
scaled = float64(x) / 1024 / 1024
suffix = "M"
default:
scaled = float64(x) / 1024 / 1024 / 1024
suffix = "G"
}
if math.Floor(scaled) == scaled {
return fmt.Sprintf("%.0f", scaled), suffix
}
return fmt.Sprintf("%.3f", scaled), suffix
}
// String turns SizeSuffix into a string
func (x SizeSuffix) String() string {
val, suffix := x.string()
return val + suffix
}
// Unit turns SizeSuffix into a string with a unit
func (x SizeSuffix) Unit(unit string) string {
val, suffix := x.string()
if val == "off" {
return val
}
return val + " " + suffix + unit
}
// Set a SizeSuffix
func (x *SizeSuffix) Set(s string) error {
if len(s) == 0 {
return errors.New("empty string")
}
if strings.ToLower(s) == "off" {
*x = -1
return nil
}
suffix := s[len(s)-1]
suffixLen := 1
var multiplier float64
switch suffix {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
suffixLen = 0
multiplier = 1 << 10
case 'b', 'B':
multiplier = 1
case 'k', 'K':
multiplier = 1 << 10
case 'm', 'M':
multiplier = 1 << 20
case 'g', 'G':
multiplier = 1 << 30
default:
return errors.Errorf("bad suffix %q", suffix)
}
s = s[:len(s)-suffixLen]
value, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if value < 0 {
return errors.Errorf("size can't be negative %q", s)
}
value *= multiplier
*x = SizeSuffix(value)
return nil
}
// Type of the value
func (x *SizeSuffix) Type() string {
return "int64"
}

90
fs/sizesuffix_test.go Normal file
View File

@@ -0,0 +1,90 @@
package fs
import (
"testing"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interface
var _ pflag.Value = (*SizeSuffix)(nil)
func TestSizeSuffixString(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0"},
{102, "102"},
{1024, "1k"},
{1024 * 1024, "1M"},
{1024 * 1024 * 1024, "1G"},
{10 * 1024 * 1024 * 1024, "10G"},
{10.1 * 1024 * 1024 * 1024, "10.100G"},
{-1, "off"},
{-100, "off"},
} {
ss := SizeSuffix(test.in)
got := ss.String()
assert.Equal(t, test.want, got)
}
}
func TestSizeSuffixUnit(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0 Bytes"},
{102, "102 Bytes"},
{1024, "1 kBytes"},
{1024 * 1024, "1 MBytes"},
{1024 * 1024 * 1024, "1 GBytes"},
{10 * 1024 * 1024 * 1024, "10 GBytes"},
{10.1 * 1024 * 1024 * 1024, "10.100 GBytes"},
{-1, "off"},
{-100, "off"},
} {
ss := SizeSuffix(test.in)
got := ss.Unit("Bytes")
assert.Equal(t, test.want, got)
}
}
func TestSizeSuffixSet(t *testing.T) {
for _, test := range []struct {
in string
want int64
err bool
}{
{"0", 0, false},
{"1b", 1, false},
{"102B", 102, false},
{"0.1k", 102, false},
{"0.1", 102, false},
{"1K", 1024, false},
{"1", 1024, false},
{"2.5", 1024 * 2.5, false},
{"1M", 1024 * 1024, false},
{"1.g", 1024 * 1024 * 1024, false},
{"10G", 10 * 1024 * 1024 * 1024, false},
{"off", -1, false},
{"OFF", -1, false},
{"", 0, true},
{"1p", 0, true},
{"1.p", 0, true},
{"1p", 0, true},
{"-1K", 0, true},
} {
ss := SizeSuffix(0)
err := ss.Set(test.in)
if test.err {
require.Error(t, err)
} else {
require.NoError(t, err)
}
assert.Equal(t, test.want, int64(ss))
}
}

View File

@@ -1,65 +1,68 @@
// Implementation of sync/copy/move
package fs
// Package sync is the implementation of sync/copy/move
package sync
import (
"fmt"
"sort"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/march"
"github.com/ncw/rclone/fs/operations"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
var oldSyncMethod = BoolP("old-sync-method", "", false, "Deprecated - use --fast-list instead")
type syncCopyMove struct {
// parameters
fdst Fs
fsrc Fs
deleteMode DeleteMode // how we are doing deletions
fdst fs.Fs
fsrc fs.Fs
deleteMode fs.DeleteMode // how we are doing deletions
DoMove bool
deleteEmptySrcDirs bool
dir string
// internal state
ctx context.Context // internal context for controlling go-routines
cancel func() // cancel the context
noTraverse bool // if set don't trafevers the dst
deletersWg sync.WaitGroup // for delete before go routine
deleteFilesCh chan Object // channel to receive deletes if delete before
trackRenames bool // set if we should do server side renames
dstFilesMu sync.Mutex // protect dstFiles
dstFiles map[string]Object // dst files, always filled
srcFiles map[string]Object // src files, only used if deleteBefore
srcFilesChan chan Object // passes src objects
srcFilesResult chan error // error result of src listing
dstFilesResult chan error // error result of dst listing
dstEmptyDirsMu sync.Mutex // protect dstEmptyDirs
dstEmptyDirs []DirEntry // potentially empty directories
srcEmptyDirsMu sync.Mutex // protect srcEmptyDirs
srcEmptyDirs []DirEntry // potentially empty directories
checkerWg sync.WaitGroup // wait for checkers
toBeChecked ObjectPairChan // checkers channel
transfersWg sync.WaitGroup // wait for transfers
toBeUploaded ObjectPairChan // copiers channel
errorMu sync.Mutex // Mutex covering the errors variables
err error // normal error from copy process
noRetryErr error // error with NoRetry set
fatalErr error // fatal error
commonHash HashType // common hash type between src and dst
renameMapMu sync.Mutex // mutex to protect the below
renameMap map[string][]Object // dst files by hash - only used by trackRenames
renamerWg sync.WaitGroup // wait for renamers
toBeRenamed ObjectPairChan // renamers channel
trackRenamesWg sync.WaitGroup // wg for background track renames
trackRenamesCh chan Object // objects are pumped in here
renameCheck []Object // accumulate files to check for rename here
backupDir Fs // place to store overwrites/deletes
suffix string // suffix to add to files placed in backupDir
ctx context.Context // internal context for controlling go-routines
cancel func() // cancel the context
noTraverse bool // if set don't trafevers the dst
deletersWg sync.WaitGroup // for delete before go routine
deleteFilesCh chan fs.Object // channel to receive deletes if delete before
trackRenames bool // set if we should do server side renames
dstFilesMu sync.Mutex // protect dstFiles
dstFiles map[string]fs.Object // dst files, always filled
srcFiles map[string]fs.Object // src files, only used if deleteBefore
srcFilesChan chan fs.Object // passes src objects
srcFilesResult chan error // error result of src listing
dstFilesResult chan error // error result of dst listing
dstEmptyDirsMu sync.Mutex // protect dstEmptyDirs
dstEmptyDirs []fs.DirEntry // potentially empty directories
srcEmptyDirsMu sync.Mutex // protect srcEmptyDirs
srcEmptyDirs []fs.DirEntry // potentially empty directories
checkerWg sync.WaitGroup // wait for checkers
toBeChecked fs.ObjectPairChan // checkers channel
transfersWg sync.WaitGroup // wait for transfers
toBeUploaded fs.ObjectPairChan // copiers channel
errorMu sync.Mutex // Mutex covering the errors variables
err error // normal error from copy process
noRetryErr error // error with NoRetry set
fatalErr error // fatal error
commonHash hash.Type // common hash type between src and dst
renameMapMu sync.Mutex // mutex to protect the below
renameMap map[string][]fs.Object // dst files by hash - only used by trackRenames
renamerWg sync.WaitGroup // wait for renamers
toBeRenamed fs.ObjectPairChan // renamers channel
trackRenamesWg sync.WaitGroup // wg for background track renames
trackRenamesCh chan fs.Object // objects are pumped in here
renameCheck []fs.Object // accumulate files to check for rename here
backupDir fs.Fs // place to store overwrites/deletes
suffix string // suffix to add to files placed in backupDir
}
func newSyncCopyMove(fdst, fsrc Fs, deleteMode DeleteMode, DoMove bool, deleteEmptySrcDirs bool) (*syncCopyMove, error) {
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) (*syncCopyMove, error) {
s := &syncCopyMove{
fdst: fdst,
fsrc: fsrc,
@@ -67,64 +70,64 @@ func newSyncCopyMove(fdst, fsrc Fs, deleteMode DeleteMode, DoMove bool, deleteEm
DoMove: DoMove,
deleteEmptySrcDirs: deleteEmptySrcDirs,
dir: "",
srcFilesChan: make(chan Object, Config.Checkers+Config.Transfers),
srcFilesChan: make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers),
srcFilesResult: make(chan error, 1),
dstFilesResult: make(chan error, 1),
noTraverse: Config.NoTraverse,
toBeChecked: make(ObjectPairChan, Config.Transfers),
toBeUploaded: make(ObjectPairChan, Config.Transfers),
deleteFilesCh: make(chan Object, Config.Checkers),
trackRenames: Config.TrackRenames,
noTraverse: fs.Config.NoTraverse,
toBeChecked: make(fs.ObjectPairChan, fs.Config.Transfers),
toBeUploaded: make(fs.ObjectPairChan, fs.Config.Transfers),
deleteFilesCh: make(chan fs.Object, fs.Config.Checkers),
trackRenames: fs.Config.TrackRenames,
commonHash: fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(),
toBeRenamed: make(ObjectPairChan, Config.Transfers),
trackRenamesCh: make(chan Object, Config.Checkers),
toBeRenamed: make(fs.ObjectPairChan, fs.Config.Transfers),
trackRenamesCh: make(chan fs.Object, fs.Config.Checkers),
}
s.ctx, s.cancel = context.WithCancel(context.Background())
if s.noTraverse && s.deleteMode != DeleteModeOff {
Errorf(nil, "Ignoring --no-traverse with sync")
if s.noTraverse && s.deleteMode != fs.DeleteModeOff {
fs.Errorf(nil, "Ignoring --no-traverse with sync")
s.noTraverse = false
}
if s.trackRenames {
// Don't track renames for remotes without server-side move support.
if !CanServerSideMove(fdst) {
Errorf(fdst, "Ignoring --track-renames as the destination does not support server-side move or copy")
if !operations.CanServerSideMove(fdst) {
fs.Errorf(fdst, "Ignoring --track-renames as the destination does not support server-side move or copy")
s.trackRenames = false
}
if s.commonHash == HashNone {
Errorf(fdst, "Ignoring --track-renames as the source and destination do not have a common hash")
if s.commonHash == hash.HashNone {
fs.Errorf(fdst, "Ignoring --track-renames as the source and destination do not have a common hash")
s.trackRenames = false
}
}
if s.trackRenames {
// track renames needs delete after
if s.deleteMode != DeleteModeOff {
s.deleteMode = DeleteModeAfter
if s.deleteMode != fs.DeleteModeOff {
s.deleteMode = fs.DeleteModeAfter
}
if s.noTraverse {
Errorf(nil, "Ignoring --no-traverse with --track-renames")
fs.Errorf(nil, "Ignoring --no-traverse with --track-renames")
s.noTraverse = false
}
}
// Make Fs for --backup-dir if required
if Config.BackupDir != "" {
if fs.Config.BackupDir != "" {
var err error
s.backupDir, err = NewFs(Config.BackupDir)
s.backupDir, err = fs.NewFs(fs.Config.BackupDir)
if err != nil {
return nil, FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", Config.BackupDir, err))
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", fs.Config.BackupDir, err))
}
if !CanServerSideMove(s.backupDir) {
return nil, FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy"))
if !operations.CanServerSideMove(s.backupDir) {
return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy"))
}
if !SameConfig(fdst, s.backupDir) {
return nil, FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination"))
if !operations.SameConfig(fdst, s.backupDir) {
return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination"))
}
if Overlapping(fdst, s.backupDir) {
return nil, FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap"))
if operations.Overlapping(fdst, s.backupDir) {
return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap"))
}
if Overlapping(fsrc, s.backupDir) {
return nil, FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
if operations.Overlapping(fsrc, s.backupDir) {
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
}
s.suffix = Config.Suffix
s.suffix = fs.Config.Suffix
}
return s, nil
}
@@ -141,7 +144,7 @@ func (s *syncCopyMove) aborting() bool {
// This reads the map and pumps it into the channel passed in, closing
// the channel at the end
func (s *syncCopyMove) pumpMapToChan(files map[string]Object, out chan<- Object) {
func (s *syncCopyMove) pumpMapToChan(files map[string]fs.Object, out chan<- fs.Object) {
outer:
for _, o := range files {
if s.aborting() {
@@ -157,62 +160,6 @@ outer:
s.srcFilesResult <- nil
}
// NeedTransfer checks to see if src needs to be copied to dst using
// the current config.
//
// Returns a flag which indicates whether the file needs to be
// transferred or not.
func NeedTransfer(dst, src Object) bool {
if dst == nil {
Debugf(src, "Couldn't find file - need to transfer")
return true
}
// If we should ignore existing files, don't transfer
if Config.IgnoreExisting {
Debugf(src, "Destination exists, skipping")
return false
}
// If we should upload unconditionally
if Config.IgnoreTimes {
Debugf(src, "Transferring unconditionally as --ignore-times is in use")
return true
}
// If UpdateOlder is in effect, skip if dst is newer than src
if Config.UpdateOlder {
srcModTime := src.ModTime()
dstModTime := dst.ModTime()
dt := dstModTime.Sub(srcModTime)
// If have a mutually agreed precision then use that
modifyWindow := Config.ModifyWindow
if modifyWindow == ModTimeNotSupported {
// Otherwise use 1 second as a safe default as
// the resolution of the time a file was
// uploaded.
modifyWindow = time.Second
}
switch {
case dt >= modifyWindow:
Debugf(src, "Destination is newer than source, skipping")
return false
case dt <= -modifyWindow:
Debugf(src, "Destination is older than source, transferring")
default:
if src.Size() == dst.Size() {
Debugf(src, "Destination mod time is within %v of source and sizes identical, skipping", modifyWindow)
return false
}
Debugf(src, "Destination mod time is within %v of source but sizes differ, transferring", modifyWindow)
}
} else {
// Check to see if changed or not
if Equal(src, dst) {
Debugf(src, "Unchanged skipping")
return false
}
}
return true
}
// This checks the types of errors returned while copying files
func (s *syncCopyMove) processError(err error) {
if err == nil {
@@ -221,12 +168,12 @@ func (s *syncCopyMove) processError(err error) {
s.errorMu.Lock()
defer s.errorMu.Unlock()
switch {
case IsFatalError(err):
case fserrors.IsFatalError(err):
if !s.aborting() {
s.cancel()
}
s.fatalErr = err
case IsNoRetryError(err):
case fserrors.IsNoRetryError(err):
s.noRetryErr = err
default:
s.err = err
@@ -252,7 +199,7 @@ func (s *syncCopyMove) currentError() error {
// pairChecker reads Objects~s on in send to out if they need transferring.
//
// FIXME potentially doing lots of hashes at once
func (s *syncCopyMove) pairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
func (s *syncCopyMove) pairChecker(in fs.ObjectPairChan, out fs.ObjectPairChan, wg *sync.WaitGroup) {
defer wg.Done()
for {
if s.aborting() {
@@ -263,26 +210,26 @@ func (s *syncCopyMove) pairChecker(in ObjectPairChan, out ObjectPairChan, wg *sy
if !ok {
return
}
src := pair.src
Stats.Checking(src.Remote())
src := pair.Src
accounting.Stats.Checking(src.Remote())
// Check to see if can store this
if src.Storable() {
if NeedTransfer(pair.dst, pair.src) {
if operations.NeedTransfer(pair.Dst, pair.Src) {
// If files are treated as immutable, fail if destination exists and does not match
if Config.Immutable && pair.dst != nil {
Errorf(pair.dst, "Source and destination exist but do not match: immutable file modified")
s.processError(ErrorImmutableModified)
if fs.Config.Immutable && pair.Dst != nil {
fs.Errorf(pair.Dst, "Source and destination exist but do not match: immutable file modified")
s.processError(fs.ErrorImmutableModified)
} else {
// If destination already exists, then we must move it into --backup-dir if required
if pair.dst != nil && s.backupDir != nil {
remoteWithSuffix := pair.dst.Remote() + s.suffix
if pair.Dst != nil && s.backupDir != nil {
remoteWithSuffix := pair.Dst.Remote() + s.suffix
overwritten, _ := s.backupDir.NewObject(remoteWithSuffix)
_, err := Move(s.backupDir, overwritten, remoteWithSuffix, pair.dst)
_, err := operations.Move(s.backupDir, overwritten, remoteWithSuffix, pair.Dst)
if err != nil {
s.processError(err)
} else {
// If successful zero out the dst as it is no longer there and copy the file
pair.dst = nil
pair.Dst = nil
out <- pair
}
} else {
@@ -293,11 +240,11 @@ func (s *syncCopyMove) pairChecker(in ObjectPairChan, out ObjectPairChan, wg *sy
// If moving need to delete the files we don't need to copy
if s.DoMove {
// Delete src if no error on copy
s.processError(DeleteFile(src))
s.processError(operations.DeleteFile(src))
}
}
}
Stats.DoneChecking(src.Remote())
accounting.Stats.DoneChecking(src.Remote())
case <-s.ctx.Done():
return
}
@@ -306,7 +253,7 @@ func (s *syncCopyMove) pairChecker(in ObjectPairChan, out ObjectPairChan, wg *sy
// pairRenamer reads Objects~s on in and attempts to rename them,
// otherwise it sends them out if they need transferring.
func (s *syncCopyMove) pairRenamer(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
func (s *syncCopyMove) pairRenamer(in fs.ObjectPairChan, out fs.ObjectPairChan, wg *sync.WaitGroup) {
defer wg.Done()
for {
if s.aborting() {
@@ -317,7 +264,7 @@ func (s *syncCopyMove) pairRenamer(in ObjectPairChan, out ObjectPairChan, wg *sy
if !ok {
return
}
src := pair.src
src := pair.Src
if !s.tryRename(src) {
// pass on if not renamed
out <- pair
@@ -329,7 +276,7 @@ func (s *syncCopyMove) pairRenamer(in ObjectPairChan, out ObjectPairChan, wg *sy
}
// pairCopyOrMove reads Objects on in and moves or copies them.
func (s *syncCopyMove) pairCopyOrMove(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
func (s *syncCopyMove) pairCopyOrMove(in fs.ObjectPairChan, fdst fs.Fs, wg *sync.WaitGroup) {
defer wg.Done()
var err error
for {
@@ -341,15 +288,15 @@ func (s *syncCopyMove) pairCopyOrMove(in ObjectPairChan, fdst Fs, wg *sync.WaitG
if !ok {
return
}
src := pair.src
Stats.Transferring(src.Remote())
src := pair.Src
accounting.Stats.Transferring(src.Remote())
if s.DoMove {
_, err = Move(fdst, pair.dst, src.Remote(), src)
_, err = operations.Move(fdst, pair.Dst, src.Remote(), src)
} else {
_, err = Copy(fdst, pair.dst, src.Remote(), src)
_, err = operations.Copy(fdst, pair.Dst, src.Remote(), src)
}
s.processError(err)
Stats.DoneTransferring(src.Remote(), err == nil)
accounting.Stats.DoneTransferring(src.Remote(), err == nil)
case <-s.ctx.Done():
return
}
@@ -358,8 +305,8 @@ func (s *syncCopyMove) pairCopyOrMove(in ObjectPairChan, fdst Fs, wg *sync.WaitG
// This starts the background checkers.
func (s *syncCopyMove) startCheckers() {
s.checkerWg.Add(Config.Checkers)
for i := 0; i < Config.Checkers; i++ {
s.checkerWg.Add(fs.Config.Checkers)
for i := 0; i < fs.Config.Checkers; i++ {
go s.pairChecker(s.toBeChecked, s.toBeUploaded, &s.checkerWg)
}
}
@@ -367,14 +314,14 @@ func (s *syncCopyMove) startCheckers() {
// This stops the background checkers
func (s *syncCopyMove) stopCheckers() {
close(s.toBeChecked)
Infof(s.fdst, "Waiting for checks to finish")
fs.Infof(s.fdst, "Waiting for checks to finish")
s.checkerWg.Wait()
}
// This starts the background transfers
func (s *syncCopyMove) startTransfers() {
s.transfersWg.Add(Config.Transfers)
for i := 0; i < Config.Transfers; i++ {
s.transfersWg.Add(fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
go s.pairCopyOrMove(s.toBeUploaded, s.fdst, &s.transfersWg)
}
}
@@ -382,7 +329,7 @@ func (s *syncCopyMove) startTransfers() {
// This stops the background transfers
func (s *syncCopyMove) stopTransfers() {
close(s.toBeUploaded)
Infof(s.fdst, "Waiting for transfers to finish")
fs.Infof(s.fdst, "Waiting for transfers to finish")
s.transfersWg.Wait()
}
@@ -391,8 +338,8 @@ func (s *syncCopyMove) startRenamers() {
if !s.trackRenames {
return
}
s.renamerWg.Add(Config.Checkers)
for i := 0; i < Config.Checkers; i++ {
s.renamerWg.Add(fs.Config.Checkers)
for i := 0; i < fs.Config.Checkers; i++ {
go s.pairRenamer(s.toBeRenamed, s.toBeUploaded, &s.renamerWg)
}
}
@@ -403,7 +350,7 @@ func (s *syncCopyMove) stopRenamers() {
return
}
close(s.toBeRenamed)
Infof(s.fdst, "Waiting for renames to finish")
fs.Infof(s.fdst, "Waiting for renames to finish")
s.renamerWg.Wait()
}
@@ -432,20 +379,20 @@ func (s *syncCopyMove) stopTrackRenames() {
// This starts the background deletion of files for --delete-during
func (s *syncCopyMove) startDeleters() {
if s.deleteMode != DeleteModeDuring && s.deleteMode != DeleteModeOnly {
if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly {
return
}
s.deletersWg.Add(1)
go func() {
defer s.deletersWg.Done()
err := deleteFilesWithBackupDir(s.deleteFilesCh, s.backupDir)
err := operations.DeleteFilesWithBackupDir(s.deleteFilesCh, s.backupDir)
s.processError(err)
}()
}
// This stops the background deleters
func (s *syncCopyMove) stopDeleters() {
if s.deleteMode != DeleteModeDuring && s.deleteMode != DeleteModeOnly {
if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly {
return
}
close(s.deleteFilesCh)
@@ -458,13 +405,13 @@ func (s *syncCopyMove) stopDeleters() {
// checkSrcMap is clear then it assumes that the any source files that
// have been found have been removed from dstFiles already.
func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
if Stats.Errored() {
Errorf(s.fdst, "%v", ErrorNotDeleting)
return ErrorNotDeleting
if accounting.Stats.Errored() {
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
return fs.ErrorNotDeleting
}
// Delete the spare files
toDelete := make(ObjectsChan, Config.Transfers)
toDelete := make(fs.ObjectsChan, fs.Config.Transfers)
go func() {
for remote, o := range s.dstFiles {
if checkSrcMap {
@@ -480,18 +427,18 @@ func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
}
close(toDelete)
}()
return deleteFilesWithBackupDir(toDelete, s.backupDir)
return operations.DeleteFilesWithBackupDir(toDelete, s.backupDir)
}
// This deletes the empty directories in the slice passed in. It
// ignores any errors deleting directories
func deleteEmptyDirectories(f Fs, entries DirEntries) error {
func deleteEmptyDirectories(f fs.Fs, entries fs.DirEntries) error {
if len(entries) == 0 {
return nil
}
if Stats.Errored() {
Errorf(f, "%v", ErrorNotDeletingDirs)
return ErrorNotDeletingDirs
if accounting.Stats.Errored() {
fs.Errorf(f, "%v", fs.ErrorNotDeletingDirs)
return fs.ErrorNotDeletingDirs
}
// Now delete the empty directories starting from the longest path
@@ -500,25 +447,25 @@ func deleteEmptyDirectories(f Fs, entries DirEntries) error {
var okCount int
for i := len(entries) - 1; i >= 0; i-- {
entry := entries[i]
dir, ok := entry.(Directory)
dir, ok := entry.(fs.Directory)
if ok {
// TryRmdir only deletes empty directories
err := TryRmdir(f, dir.Remote())
err := operations.TryRmdir(f, dir.Remote())
if err != nil {
Debugf(logDirName(f, dir.Remote()), "Failed to Rmdir: %v", err)
fs.Debugf(fs.LogDirName(f, dir.Remote()), "Failed to Rmdir: %v", err)
errorCount++
} else {
okCount++
}
} else {
Errorf(f, "Not a directory: %v", entry)
fs.Errorf(f, "Not a directory: %v", entry)
}
}
if errorCount > 0 {
Debugf(f, "failed to delete %d directories", errorCount)
fs.Debugf(f, "failed to delete %d directories", errorCount)
}
if okCount > 0 {
Debugf(f, "deleted %d directories", okCount)
fs.Debugf(f, "deleted %d directories", okCount)
}
return nil
}
@@ -526,11 +473,11 @@ func deleteEmptyDirectories(f Fs, entries DirEntries) error {
// renameHash makes a string with the size and the hash for rename detection
//
// it may return an empty string in which case no hash could be made
func (s *syncCopyMove) renameHash(obj Object) (hash string) {
func (s *syncCopyMove) renameHash(obj fs.Object) (hash string) {
var err error
hash, err = obj.Hash(s.commonHash)
if err != nil {
Debugf(obj, "Hash failed: %v", err)
fs.Debugf(obj, "Hash failed: %v", err)
return ""
}
if hash == "" {
@@ -540,7 +487,7 @@ func (s *syncCopyMove) renameHash(obj Object) (hash string) {
}
// pushRenameMap adds the object with hash to the rename map
func (s *syncCopyMove) pushRenameMap(hash string, obj Object) {
func (s *syncCopyMove) pushRenameMap(hash string, obj fs.Object) {
s.renameMapMu.Lock()
s.renameMap[hash] = append(s.renameMap[hash], obj)
s.renameMapMu.Unlock()
@@ -548,7 +495,7 @@ func (s *syncCopyMove) pushRenameMap(hash string, obj Object) {
// popRenameMap finds the object with hash and pop the first match from
// renameMap or returns nil if not found.
func (s *syncCopyMove) popRenameMap(hash string) (dst Object) {
func (s *syncCopyMove) popRenameMap(hash string) (dst fs.Object) {
s.renameMapMu.Lock()
dsts, ok := s.renameMap[hash]
if ok && len(dsts) > 0 {
@@ -566,7 +513,7 @@ func (s *syncCopyMove) popRenameMap(hash string) (dst Object) {
// makeRenameMap builds a map of the destination files by hash that
// match sizes in the slice of objects in s.renameCheck
func (s *syncCopyMove) makeRenameMap() {
Infof(s.fdst, "Making map for --track-renames")
fs.Infof(s.fdst, "Making map for --track-renames")
// first make a map of possible sizes we need to check
possibleSizes := map[int64]struct{}{}
@@ -575,38 +522,38 @@ func (s *syncCopyMove) makeRenameMap() {
}
// pump all the dstFiles into in
in := make(chan Object, Config.Checkers)
in := make(chan fs.Object, fs.Config.Checkers)
go s.pumpMapToChan(s.dstFiles, in)
// now make a map of size,hash for all dstFiles
s.renameMap = make(map[string][]Object)
s.renameMap = make(map[string][]fs.Object)
var wg sync.WaitGroup
wg.Add(Config.Transfers)
for i := 0; i < Config.Transfers; i++ {
wg.Add(fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
go func() {
defer wg.Done()
for obj := range in {
// only create hash for dst Object if its size could match
// only create hash for dst fs.Object if its size could match
if _, found := possibleSizes[obj.Size()]; found {
Stats.Checking(obj.Remote())
accounting.Stats.Checking(obj.Remote())
hash := s.renameHash(obj)
if hash != "" {
s.pushRenameMap(hash, obj)
}
Stats.DoneChecking(obj.Remote())
accounting.Stats.DoneChecking(obj.Remote())
}
}
}()
}
wg.Wait()
Infof(s.fdst, "Finished making map for --track-renames")
fs.Infof(s.fdst, "Finished making map for --track-renames")
}
// tryRename renames a src object when doing track renames if
// possible, it returns true if the object was renamed.
func (s *syncCopyMove) tryRename(src Object) bool {
Stats.Checking(src.Remote())
defer Stats.DoneChecking(src.Remote())
func (s *syncCopyMove) tryRename(src fs.Object) bool {
accounting.Stats.Checking(src.Remote())
defer accounting.Stats.DoneChecking(src.Remote())
// Calculate the hash of the src object
hash := s.renameHash(src)
@@ -624,9 +571,9 @@ func (s *syncCopyMove) tryRename(src Object) bool {
dstOverwritten, _ := s.fdst.NewObject(src.Remote())
// Rename dst to have name src.Remote()
_, err := Move(s.fdst, dstOverwritten, src.Remote(), dst)
_, err := operations.Move(s.fdst, dstOverwritten, src.Remote(), dst)
if err != nil {
Debugf(src, "Failed to rename to %q: %v", dst.Remote(), err)
fs.Debugf(src, "Failed to rename to %q: %v", dst.Remote(), err)
return false
}
@@ -635,7 +582,7 @@ func (s *syncCopyMove) tryRename(src Object) bool {
delete(s.dstFiles, dst.Remote())
s.dstFilesMu.Unlock()
Infof(src, "Renamed from %q", dst.Remote())
fs.Infof(src, "Renamed from %q", dst.Remote())
return true
}
@@ -647,8 +594,8 @@ func (s *syncCopyMove) tryRename(src Object) bool {
//
// dir is the start directory, "" for root
func (s *syncCopyMove) run() error {
if Same(s.fdst, s.fsrc) {
Errorf(s.fdst, "Nothing to do as source and destination are the same")
if operations.Same(s.fdst, s.fsrc) {
fs.Errorf(s.fdst, "Nothing to do as source and destination are the same")
return nil
}
@@ -657,13 +604,13 @@ func (s *syncCopyMove) run() error {
s.startRenamers()
s.startTransfers()
s.startDeleters()
s.dstFiles = make(map[string]Object)
s.dstFiles = make(map[string]fs.Object)
s.startTrackRenames()
// set up a march over fdst and fsrc
m := newMarch(s.ctx, s.fdst, s.fsrc, s.dir, s)
m.run()
m := march.New(s.ctx, s.fdst, s.fsrc, s.dir, s)
m.Run()
s.stopTrackRenames()
if s.trackRenames {
@@ -671,7 +618,7 @@ func (s *syncCopyMove) run() error {
s.makeRenameMap()
// Attempt renames for all the files which don't have a matching dst
for _, src := range s.renameCheck {
s.toBeRenamed <- ObjectPair{src, nil}
s.toBeRenamed <- fs.ObjectPair{Src: src, Dst: nil}
}
}
@@ -682,18 +629,18 @@ func (s *syncCopyMove) run() error {
s.stopDeleters()
// Delete files after
if s.deleteMode == DeleteModeAfter {
if s.deleteMode == fs.DeleteModeAfter {
if s.currentError() != nil {
Errorf(s.fdst, "%v", ErrorNotDeleting)
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
} else {
s.processError(s.deleteFiles(false))
}
}
// Prune empty directories
if s.deleteMode != DeleteModeOff {
if s.deleteMode != fs.DeleteModeOff {
if s.currentError() != nil {
Errorf(s.fdst, "%v", ErrorNotDeletingDirs)
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeletingDirs)
} else {
s.processError(deleteEmptyDirectories(s.fdst, s.dstEmptyDirs))
}
@@ -709,24 +656,24 @@ func (s *syncCopyMove) run() error {
}
// DstOnly have an object which is in the destination only
func (s *syncCopyMove) DstOnly(dst DirEntry) (recurse bool) {
if s.deleteMode == DeleteModeOff {
func (s *syncCopyMove) DstOnly(dst fs.DirEntry) (recurse bool) {
if s.deleteMode == fs.DeleteModeOff {
return false
}
switch x := dst.(type) {
case Object:
case fs.Object:
switch s.deleteMode {
case DeleteModeAfter:
case fs.DeleteModeAfter:
// record object as needs deleting
s.dstFilesMu.Lock()
s.dstFiles[x.Remote()] = x
s.dstFilesMu.Unlock()
case DeleteModeDuring, DeleteModeOnly:
case fs.DeleteModeDuring, fs.DeleteModeOnly:
s.deleteFilesCh <- x
default:
panic(fmt.Sprintf("unexpected delete mode %d", s.deleteMode))
}
case Directory:
case fs.Directory:
// Do the same thing to the entire contents of the directory
// Record directory as it is potentially empty and needs deleting
if s.fdst.Features().CanHaveEmptyDirectories {
@@ -743,20 +690,20 @@ func (s *syncCopyMove) DstOnly(dst DirEntry) (recurse bool) {
}
// SrcOnly have an object which is in the source only
func (s *syncCopyMove) SrcOnly(src DirEntry) (recurse bool) {
if s.deleteMode == DeleteModeOnly {
func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) {
if s.deleteMode == fs.DeleteModeOnly {
return false
}
switch x := src.(type) {
case Object:
case fs.Object:
if s.trackRenames {
// Save object to check for a rename later
s.trackRenamesCh <- x
} else {
// No need to check since doesn't exist
s.toBeUploaded <- ObjectPair{x, nil}
s.toBeUploaded <- fs.ObjectPair{Src: x, Dst: nil}
}
case Directory:
case fs.Directory:
// Do the same thing to the entire contents of the directory
// Record the directory for deletion
s.srcEmptyDirsMu.Lock()
@@ -770,24 +717,24 @@ func (s *syncCopyMove) SrcOnly(src DirEntry) (recurse bool) {
}
// Match is called when src and dst are present, so sync src to dst
func (s *syncCopyMove) Match(dst, src DirEntry) (recurse bool) {
func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
switch srcX := src.(type) {
case Object:
if s.deleteMode == DeleteModeOnly {
case fs.Object:
if s.deleteMode == fs.DeleteModeOnly {
return false
}
dstX, ok := dst.(Object)
dstX, ok := dst.(fs.Object)
if ok {
s.toBeChecked <- ObjectPair{srcX, dstX}
s.toBeChecked <- fs.ObjectPair{Src: srcX, Dst: dstX}
} else {
// FIXME src is file, dst is directory
err := errors.New("can't overwrite directory with file")
Errorf(dst, "%v", err)
fs.Errorf(dst, "%v", err)
s.processError(err)
}
case Directory:
case fs.Directory:
// Do the same thing to the entire contents of the directory
_, ok := dst.(Directory)
_, ok := dst.(fs.Directory)
if ok {
// Record the src directory for deletion
s.srcEmptyDirsMu.Lock()
@@ -797,7 +744,7 @@ func (s *syncCopyMove) Match(dst, src DirEntry) (recurse bool) {
}
// FIXME src is dir, dst is file
err := errors.New("can't overwrite file with directory")
Errorf(dst, "%v", err)
fs.Errorf(dst, "%v", err)
s.processError(err)
default:
panic("Bad object in DirEntries")
@@ -812,20 +759,17 @@ func (s *syncCopyMove) Match(dst, src DirEntry) (recurse bool) {
// If DoMove is true then files will be moved instead of copied
//
// dir is the start directory, "" for root
func runSyncCopyMove(fdst, fsrc Fs, deleteMode DeleteMode, DoMove bool, deleteEmptySrcDirs bool) error {
if *oldSyncMethod {
return FatalError(errors.New("--old-sync-method is deprecated use --fast-list instead"))
}
if deleteMode != DeleteModeOff && DoMove {
return FatalError(errors.New("can't delete and move at the same time"))
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) error {
if deleteMode != fs.DeleteModeOff && DoMove {
return fserrors.FatalError(errors.New("can't delete and move at the same time"))
}
// Run an extra pass to delete only
if deleteMode == DeleteModeBefore {
if Config.TrackRenames {
return FatalError(errors.New("can't use --delete-before with --track-renames"))
if deleteMode == fs.DeleteModeBefore {
if fs.Config.TrackRenames {
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
}
// only delete stuff during in this pass
do, err := newSyncCopyMove(fdst, fsrc, DeleteModeOnly, false, deleteEmptySrcDirs)
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs)
if err != nil {
return err
}
@@ -834,7 +778,7 @@ func runSyncCopyMove(fdst, fsrc Fs, deleteMode DeleteMode, DoMove bool, deleteEm
return err
}
// Next pass does a copy only
deleteMode = DeleteModeOff
deleteMode = fs.DeleteModeOff
}
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs)
if err != nil {
@@ -844,52 +788,52 @@ func runSyncCopyMove(fdst, fsrc Fs, deleteMode DeleteMode, DoMove bool, deleteEm
}
// Sync fsrc into fdst
func Sync(fdst, fsrc Fs) error {
return runSyncCopyMove(fdst, fsrc, Config.DeleteMode, false, false)
func Sync(fdst, fsrc fs.Fs) error {
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false)
}
// CopyDir copies fsrc into fdst
func CopyDir(fdst, fsrc Fs) error {
return runSyncCopyMove(fdst, fsrc, DeleteModeOff, false, false)
func CopyDir(fdst, fsrc fs.Fs) error {
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false)
}
// moveDir moves fsrc into fdst
func moveDir(fdst, fsrc Fs, deleteEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, DeleteModeOff, true, deleteEmptySrcDirs)
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs)
}
// MoveDir moves fsrc into fdst
func MoveDir(fdst, fsrc Fs, deleteEmptySrcDirs bool) error {
if Same(fdst, fsrc) {
Errorf(fdst, "Nothing to do as source and destination are the same")
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
if operations.Same(fdst, fsrc) {
fs.Errorf(fdst, "Nothing to do as source and destination are the same")
return nil
}
// First attempt to use DirMover if exists, same Fs and no filters are active
if fdstDirMove := fdst.Features().DirMove; fdstDirMove != nil && SameConfig(fsrc, fdst) && Config.Filter.InActive() {
if Config.DryRun {
Logf(fdst, "Not doing server side directory move as --dry-run")
if fdstDirMove := fdst.Features().DirMove; fdstDirMove != nil && operations.SameConfig(fsrc, fdst) && filter.Active.InActive() {
if fs.Config.DryRun {
fs.Logf(fdst, "Not doing server side directory move as --dry-run")
return nil
}
Debugf(fdst, "Using server side directory move")
fs.Debugf(fdst, "Using server side directory move")
err := fdstDirMove(fsrc, "", "")
switch err {
case ErrorCantDirMove, ErrorDirExists:
Infof(fdst, "Server side directory move failed - fallback to file moves: %v", err)
case fs.ErrorCantDirMove, fs.ErrorDirExists:
fs.Infof(fdst, "Server side directory move failed - fallback to file moves: %v", err)
case nil:
Infof(fdst, "Server side directory move succeeded")
fs.Infof(fdst, "Server side directory move succeeded")
return nil
default:
Stats.Error(err)
Errorf(fdst, "Server side directory move failed: %v", err)
fs.CountError(err)
fs.Errorf(fdst, "Server side directory move failed: %v", err)
return err
}
}
// The two remotes mustn't overlap if we didn't do server side move
if Overlapping(fdst, fsrc) {
err := ErrorCantMoveOverlapping
Errorf(fdst, "%v", err)
if operations.Overlapping(fdst, fsrc) {
err := fs.ErrorCantMoveOverlapping
fs.Errorf(fdst, "%v", err)
return err
}

View File

@@ -1,19 +1,36 @@
// Test sync/copy/move
package fs_test
package sync
import (
"runtime"
"testing"
"time"
_ "github.com/ncw/rclone/backend/all" // import all backends
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/unicode/norm"
)
// Some times used in the tests
var (
t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
t2 = fstest.Time("2011-12-25T12:59:59.123456789Z")
t3 = fstest.Time("2011-12-30T12:59:59.000000000Z")
)
// TestMain drives the tests
func TestMain(m *testing.M) {
fstest.TestMain(m)
}
// Check dry run is working
func TestCopyWithDryRun(t *testing.T) {
r := fstest.NewRun(t)
@@ -22,7 +39,7 @@ func TestCopyWithDryRun(t *testing.T) {
r.Mkdir(r.Fremote)
fs.Config.DryRun = true
err := fs.CopyDir(r.Fremote, r.Flocal)
err := CopyDir(r.Fremote, r.Flocal)
fs.Config.DryRun = false
require.NoError(t, err)
@@ -37,7 +54,7 @@ func TestCopy(t *testing.T) {
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
r.Mkdir(r.Fremote)
err := fs.CopyDir(r.Fremote, r.Flocal)
err := CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -54,7 +71,7 @@ func TestCopyNoTraverse(t *testing.T) {
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
err := fs.CopyDir(r.Fremote, r.Flocal)
err := CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -71,8 +88,8 @@ func TestSyncNoTraverse(t *testing.T) {
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -90,7 +107,7 @@ func TestCopyWithDepth(t *testing.T) {
fs.Config.MaxDepth = 1
defer func() { fs.Config.MaxDepth = -1 }()
err := fs.CopyDir(r.Fremote, r.Flocal)
err := CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1, file2)
@@ -109,7 +126,7 @@ func TestServerSideCopy(t *testing.T) {
defer finaliseCopy()
t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy)
err = fs.CopyDir(FremoteCopy, r.Fremote)
err = CopyDir(FremoteCopy, r.Fremote)
require.NoError(t, err)
fstest.CheckItems(t, FremoteCopy, file1)
@@ -124,10 +141,10 @@ func TestCopyAfterDelete(t *testing.T) {
fstest.CheckItems(t, r.Flocal)
fstest.CheckItems(t, r.Fremote, file1)
err := fs.Mkdir(r.Flocal, "")
err := operations.Mkdir(r.Flocal, "")
require.NoError(t, err)
err = fs.CopyDir(r.Fremote, r.Flocal)
err = CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal)
@@ -141,7 +158,7 @@ func TestCopyRedownload(t *testing.T) {
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
fstest.CheckItems(t, r.Fremote, file1)
err := fs.CopyDir(r.Flocal, r.Fremote)
err := CopyDir(r.Flocal, r.Fremote)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -159,24 +176,24 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
file1 := r.WriteFile("check sum", "", t1)
fstest.CheckItems(t, r.Flocal, file1)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred exactly one file.
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
assert.Equal(t, int64(1), accounting.Stats.GetTransfers())
fstest.CheckItems(t, r.Fremote, file1)
// Change last modified date only
file2 := r.WriteFile("check sum", "", t2)
fstest.CheckItems(t, r.Flocal, file2)
fs.Stats.ResetCounters()
err = fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred no files
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
assert.Equal(t, int64(0), accounting.Stats.GetTransfers())
fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file1)
}
@@ -193,24 +210,24 @@ func TestSyncSizeOnly(t *testing.T) {
file1 := r.WriteFile("sizeonly", "potato", t1)
fstest.CheckItems(t, r.Flocal, file1)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred exactly one file.
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
assert.Equal(t, int64(1), accounting.Stats.GetTransfers())
fstest.CheckItems(t, r.Fremote, file1)
// Update mtime, md5sum but not length of file
file2 := r.WriteFile("sizeonly", "POTATO", t2)
fstest.CheckItems(t, r.Flocal, file2)
fs.Stats.ResetCounters()
err = fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred no files
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
assert.Equal(t, int64(0), accounting.Stats.GetTransfers())
fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file1)
}
@@ -227,24 +244,24 @@ func TestSyncIgnoreSize(t *testing.T) {
file1 := r.WriteFile("ignore-size", "contents", t1)
fstest.CheckItems(t, r.Flocal, file1)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred exactly one file.
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
assert.Equal(t, int64(1), accounting.Stats.GetTransfers())
fstest.CheckItems(t, r.Fremote, file1)
// Update size but not date of file
file2 := r.WriteFile("ignore-size", "longer contents but same date", t1)
fstest.CheckItems(t, r.Flocal, file2)
fs.Stats.ResetCounters()
err = fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred no files
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
assert.Equal(t, int64(0), accounting.Stats.GetTransfers())
fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file1)
}
@@ -255,24 +272,24 @@ func TestSyncIgnoreTimes(t *testing.T) {
file1 := r.WriteBoth("existing", "potato", t1)
fstest.CheckItems(t, r.Fremote, file1)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred exactly 0 files because the
// files were identical.
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
assert.Equal(t, int64(0), accounting.Stats.GetTransfers())
fs.Config.IgnoreTimes = true
defer func() { fs.Config.IgnoreTimes = false }()
fs.Stats.ResetCounters()
err = fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred exactly one file even though the
// files were identical.
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
assert.Equal(t, int64(1), accounting.Stats.GetTransfers())
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file1)
@@ -286,16 +303,16 @@ func TestSyncIgnoreExisting(t *testing.T) {
fs.Config.IgnoreExisting = true
defer func() { fs.Config.IgnoreExisting = false }()
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file1)
// Change everything
r.WriteFile("existing", "newpotatoes", t2)
fs.Stats.ResetCounters()
err = fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// Items should not change
fstest.CheckItems(t, r.Fremote, file1)
@@ -313,8 +330,8 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
fs.Config.DryRun = true
defer func() { fs.Config.DryRun = false }()
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -322,8 +339,8 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
fs.Config.DryRun = false
fs.Stats.ResetCounters()
err = fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -350,8 +367,8 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file2)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -371,15 +388,15 @@ func TestSyncDoesntUpdateModtime(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file2)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file1)
// We should have transferred exactly one file, not set the mod time
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
assert.Equal(t, int64(1), accounting.Stats.GetTransfers())
}
func TestSyncAfterAddingAFile(t *testing.T) {
@@ -391,8 +408,8 @@ func TestSyncAfterAddingAFile(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1, file2)
fstest.CheckItems(t, r.Fremote, file1)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1, file2)
fstest.CheckItems(t, r.Fremote, file1, file2)
@@ -406,8 +423,8 @@ func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1)
fstest.CheckItems(t, r.Flocal, file2)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file2)
@@ -429,8 +446,8 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1)
fstest.CheckItems(t, r.Flocal, file2)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file2)
@@ -445,8 +462,8 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
file3 := r.WriteBoth("empty space", "", t2)
fs.Config.DryRun = true
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
fs.Config.DryRun = false
require.NoError(t, err)
@@ -464,8 +481,8 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2, file3)
fstest.CheckItems(t, r.Flocal, file1, file3)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1, file3)
fstest.CheckItems(t, r.Fremote, file1, file3)
@@ -478,8 +495,8 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDir(t *testing.T) {
file1 := r.WriteFile("a/potato2", "------------------------------------------------------------", t1)
file2 := r.WriteObject("b/potato", "SMALLER BUT SAME DATE", t2)
file3 := r.WriteBoth("c/non empty space", "AhHa!", t2)
require.NoError(t, fs.Mkdir(r.Fremote, "d"))
require.NoError(t, fs.Mkdir(r.Fremote, "d/e"))
require.NoError(t, operations.Mkdir(r.Fremote, "d"))
require.NoError(t, operations.Mkdir(r.Fremote, "d/e"))
fstest.CheckListingWithPrecision(
t,
@@ -510,8 +527,8 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDir(t *testing.T) {
fs.Config.ModifyWindow,
)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
@@ -549,7 +566,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
file1 := r.WriteFile("a/potato2", "------------------------------------------------------------", t1)
file2 := r.WriteObject("b/potato", "SMALLER BUT SAME DATE", t2)
file3 := r.WriteBoth("c/non empty space", "AhHa!", t2)
require.NoError(t, fs.Mkdir(r.Fremote, "d"))
require.NoError(t, operations.Mkdir(r.Fremote, "d"))
fstest.CheckListingWithPrecision(
t,
@@ -579,9 +596,9 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
fs.Config.ModifyWindow,
)
fs.Stats.ResetCounters()
fs.Stats.Error(nil)
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
fs.CountError(nil)
err := Sync(r.Fremote, r.Flocal)
assert.Equal(t, fs.ErrorNotDeleting, err)
fstest.CheckListingWithPrecision(
@@ -657,8 +674,8 @@ func TestCopyDeleteBefore(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1)
fstest.CheckItems(t, r.Flocal, file2)
fs.Stats.ResetCounters()
err := fs.CopyDir(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file1, file2)
@@ -675,20 +692,20 @@ func TestSyncWithExclude(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1, file2)
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
fs.Config.Filter.MaxSize = 40
filter.Active.Opt.MaxSize = 40
defer func() {
fs.Config.Filter.MaxSize = -1
filter.Active.Opt.MaxSize = -1
}()
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file2, file1)
// Now sync the other way round and check enormous doesn't get
// deleted as it is excluded from the sync
fs.Stats.ResetCounters()
err = fs.Sync(r.Flocal, r.Fremote)
accounting.Stats.ResetCounters()
err = Sync(r.Flocal, r.Fremote)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2, file1, file3)
}
@@ -703,22 +720,22 @@ func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
fs.Config.Filter.MaxSize = 40
fs.Config.Filter.DeleteExcluded = true
filter.Active.Opt.MaxSize = 40
filter.Active.Opt.DeleteExcluded = true
defer func() {
fs.Config.Filter.MaxSize = -1
fs.Config.Filter.DeleteExcluded = false
filter.Active.Opt.MaxSize = -1
filter.Active.Opt.DeleteExcluded = false
}()
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file2)
// Check sync the other way round to make sure enormous gets
// deleted even though it is excluded
fs.Stats.ResetCounters()
err = fs.Sync(r.Flocal, r.Fremote)
accounting.Stats.ResetCounters()
err = Sync(r.Flocal, r.Fremote)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2)
}
@@ -752,8 +769,8 @@ func TestSyncWithUpdateOlder(t *testing.T) {
fs.Config.ModifyWindow = oldModifyWindow
}()
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, oneO, twoF, threeO, fourF, fiveF)
}
@@ -769,15 +786,15 @@ func TestSyncWithTrackRenames(t *testing.T) {
}()
haveHash := r.Fremote.Hashes().Overlap(r.Flocal.Hashes()).GetOne() != fs.HashNone
canTrackRenames := haveHash && fs.CanServerSideMove(r.Fremote)
haveHash := r.Fremote.Hashes().Overlap(r.Flocal.Hashes()).GetOne() != hash.HashNone
canTrackRenames := haveHash && operations.CanServerSideMove(r.Fremote)
t.Logf("Can track renames: %v", canTrackRenames)
f1 := r.WriteFile("potato", "Potato Content", t1)
f2 := r.WriteFile("yam", "Yam Content", t2)
fs.Stats.ResetCounters()
require.NoError(t, fs.Sync(r.Fremote, r.Flocal))
accounting.Stats.ResetCounters()
require.NoError(t, Sync(r.Fremote, r.Flocal))
fstest.CheckItems(t, r.Fremote, f1, f2)
fstest.CheckItems(t, r.Flocal, f1, f2)
@@ -785,15 +802,15 @@ func TestSyncWithTrackRenames(t *testing.T) {
// Now rename locally.
f2 = r.RenameFile(f2, "yaml")
fs.Stats.ResetCounters()
require.NoError(t, fs.Sync(r.Fremote, r.Flocal))
accounting.Stats.ResetCounters()
require.NoError(t, Sync(r.Fremote, r.Flocal))
fstest.CheckItems(t, r.Fremote, f1, f2)
if canTrackRenames {
assert.Equal(t, fs.Stats.GetTransfers(), int64(0))
assert.Equal(t, accounting.Stats.GetTransfers(), int64(0))
} else {
assert.Equal(t, fs.Stats.GetTransfers(), int64(1))
assert.Equal(t, accounting.Stats.GetTransfers(), int64(1))
}
}
@@ -808,7 +825,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
file3u := r.WriteBoth("potato3", "------------------------------------------------------------ UPDATED", t2)
if testDeleteEmptyDirs {
err := fs.Mkdir(r.Fremote, "tomatoDir")
err := operations.Mkdir(r.Fremote, "tomatoDir")
require.NoError(t, err)
}
@@ -822,8 +839,8 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
fstest.CheckItems(t, FremoteMove, file2, file3)
// Do server side move
fs.Stats.ResetCounters()
err = fs.MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs)
accounting.Stats.ResetCounters()
err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs)
require.NoError(t, err)
if withFilter {
@@ -844,13 +861,13 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
defer finaliseMove2()
if testDeleteEmptyDirs {
err := fs.Mkdir(FremoteMove, "tomatoDir")
err := operations.Mkdir(FremoteMove, "tomatoDir")
require.NoError(t, err)
}
// Move it back to a new empty remote, dst does not exist this time
fs.Stats.ResetCounters()
err = fs.MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs)
accounting.Stats.ResetCounters()
err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs)
require.NoError(t, err)
if withFilter {
@@ -878,9 +895,9 @@ func TestServerSideMoveWithFilter(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
fs.Config.Filter.MinSize = 40
filter.Active.Opt.MinSize = 40
defer func() {
fs.Config.Filter.MinSize = -1
filter.Active.Opt.MinSize = -1
}()
testServerSideMove(t, r, true, false)
@@ -910,15 +927,15 @@ func TestServerSideMoveOverlap(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1)
// Subdir move with no filters should return ErrorCantMoveOverlapping
err = fs.MoveDir(FremoteMove, r.Fremote, false)
err = MoveDir(FremoteMove, r.Fremote, false)
assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error())
// Now try with a filter which should also fail with ErrorCantMoveOverlapping
fs.Config.Filter.MinSize = 40
filter.Active.Opt.MinSize = 40
defer func() {
fs.Config.Filter.MinSize = -1
filter.Active.Opt.MinSize = -1
}()
err = fs.MoveDir(FremoteMove, r.Fremote, false)
err = MoveDir(FremoteMove, r.Fremote, false)
assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error())
}
@@ -927,7 +944,7 @@ func testSyncBackupDir(t *testing.T, suffix string) {
r := fstest.NewRun(t)
defer r.Finalise()
if !fs.CanServerSideMove(r.Fremote) {
if !operations.CanServerSideMove(r.Fremote) {
t.Skip("Skipping test as remote does not support server side move")
}
r.Mkdir(r.Fremote)
@@ -953,8 +970,8 @@ func testSyncBackupDir(t *testing.T, suffix string) {
fdst, err := fs.NewFs(r.FremoteName + "/dst")
require.NoError(t, err)
fs.Stats.ResetCounters()
err = fs.Sync(fdst, r.Flocal)
accounting.Stats.ResetCounters()
err = Sync(fdst, r.Flocal)
require.NoError(t, err)
// one should be moved to the backup dir and the new one installed
@@ -974,8 +991,8 @@ func testSyncBackupDir(t *testing.T, suffix string) {
// This should delete three and overwrite one again, checking
// the files got overwritten correctly in backup-dir
fs.Stats.ResetCounters()
err = fs.Sync(fdst, r.Flocal)
accounting.Stats.ResetCounters()
err = Sync(fdst, r.Flocal)
require.NoError(t, err)
// one should be moved to the backup dir and the new one installed
@@ -1011,13 +1028,13 @@ func TestSyncUTFNorm(t *testing.T) {
file2 := r.WriteObject(Encoding2, "This is a old test", t2)
fstest.CheckItems(t, r.Fremote, file2)
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred exactly one file, but kept the
// normalized state of the file.
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
assert.Equal(t, int64(1), accounting.Stats.GetTransfers())
fstest.CheckItems(t, r.Flocal, file1)
file1.Path = file2.Path
fstest.CheckItems(t, r.Fremote, file1)
@@ -1037,8 +1054,8 @@ func TestSyncImmutable(t *testing.T) {
fstest.CheckItems(t, r.Fremote)
// Should succeed
fs.Stats.ResetCounters()
err := fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file1)
@@ -1049,8 +1066,8 @@ func TestSyncImmutable(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1)
// Should fail with ErrorImmutableModified and not modify local or remote files
fs.Stats.ResetCounters()
err = fs.Sync(r.Fremote, r.Flocal)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal)
assert.EqualError(t, err, fs.ErrorImmutableModified.Error())
fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file1)

View File

@@ -1,415 +0,0 @@
// +build ignore
// Run tests for all the remotes
//
// Run with go run test_all.go
package main
import (
"flag"
"log"
"os"
"os/exec"
"regexp"
"runtime"
"strings"
"time"
"github.com/ncw/rclone/fs"
_ "github.com/ncw/rclone/fs/all" // import all fs
"github.com/ncw/rclone/fstest"
)
type remoteConfig struct {
Name string
SubDir bool
FastList bool
}
var (
remotes = []remoteConfig{
{
Name: "TestAmazonCloudDrive:",
SubDir: false,
FastList: false,
},
{
Name: "TestB2:",
SubDir: true,
FastList: true,
},
{
Name: "TestCryptDrive:",
SubDir: false,
FastList: false,
},
{
Name: "TestCryptSwift:",
SubDir: false,
FastList: false,
},
{
Name: "TestDrive:",
SubDir: false,
FastList: false,
},
{
Name: "TestDropbox:",
SubDir: false,
FastList: false,
},
{
Name: "TestGoogleCloudStorage:",
SubDir: true,
FastList: true,
},
{
Name: "TestHubic:",
SubDir: false,
FastList: false,
},
{
Name: "TestOneDrive:",
SubDir: false,
FastList: false,
},
{
Name: "TestS3:",
SubDir: true,
FastList: true,
},
{
Name: "TestSftp:",
SubDir: false,
FastList: false,
},
{
Name: "TestSwift:",
SubDir: true,
FastList: true,
},
{
Name: "TestYandex:",
SubDir: false,
FastList: false,
},
{
Name: "TestFTP:",
SubDir: false,
FastList: false,
},
{
Name: "TestBox:",
SubDir: false,
FastList: false,
},
{
Name: "TestQingStor:",
SubDir: false,
FastList: false,
},
{
Name: "TestAzureBlob:",
SubDir: true,
FastList: true,
},
{
Name: "TestPcloud:",
SubDir: false,
FastList: false,
},
{
Name: "TestWebdav:",
SubDir: false,
FastList: false,
},
{
Name: "TestCache:",
SubDir: false,
FastList: false,
},
}
binary = "fs.test"
// Flags
maxTries = flag.Int("maxtries", 5, "Number of times to try each test")
runTests = flag.String("remotes", "", "Comma separated list of remotes to test, eg 'TestSwift:,TestS3'")
clean = flag.Bool("clean", false, "Instead of testing, clean all left over test directories")
runOnly = flag.String("run-only", "", "Run only those tests matching the regexp supplied")
timeout = flag.Duration("timeout", 30*time.Minute, "Maximum time to run each test for before giving up")
)
// test holds info about a running test
type test struct {
remote string
subdir bool
cmdLine []string
cmdString string
try int
err error
output []byte
failedTests []string
runFlag string
}
// newTest creates a new test
func newTest(remote string, subdir bool, fastlist bool) *test {
t := &test{
remote: remote,
subdir: subdir,
cmdLine: []string{"./" + binary, "-test.timeout", (*timeout).String(), "-remote", remote},
try: 1,
}
if *fstest.Verbose {
t.cmdLine = append(t.cmdLine, "-test.v")
fs.Config.LogLevel = fs.LogLevelDebug
}
if *runOnly != "" {
t.cmdLine = append(t.cmdLine, "-test.run", *runOnly)
}
if subdir {
t.cmdLine = append(t.cmdLine, "-subdir")
}
if fastlist {
t.cmdLine = append(t.cmdLine, "-fast-list")
}
t.cmdString = toShell(t.cmdLine)
return t
}
// dumpOutput prints the error output
func (t *test) dumpOutput() {
log.Println("------------------------------------------------------------")
log.Printf("---- %q ----", t.cmdString)
log.Println(string(t.output))
log.Println("------------------------------------------------------------")
}
var failRe = regexp.MustCompile(`(?m)^--- FAIL: (Test\w*) \(`)
// findFailures looks for all the tests which failed
func (t *test) findFailures() {
oldFailedTests := t.failedTests
t.failedTests = nil
for _, matches := range failRe.FindAllSubmatch(t.output, -1) {
t.failedTests = append(t.failedTests, string(matches[1]))
}
if len(t.failedTests) != 0 {
t.runFlag = "^(" + strings.Join(t.failedTests, "|") + ")$"
} else {
t.runFlag = ""
}
if t.passed() && len(t.failedTests) != 0 {
log.Printf("%q - Expecting no errors but got: %v", t.cmdString, t.failedTests)
t.dumpOutput()
} else if !t.passed() && len(t.failedTests) == 0 {
log.Printf("%q - Expecting errors but got none: %v", t.cmdString, t.failedTests)
t.dumpOutput()
t.failedTests = oldFailedTests
}
}
// nextCmdLine returns the next command line
func (t *test) nextCmdLine() []string {
cmdLine := t.cmdLine[:]
if t.runFlag != "" {
cmdLine = append(cmdLine, "-test.run", t.runFlag)
}
return cmdLine
}
// if matches then is definitely OK in the shell
var shellOK = regexp.MustCompile("^[A-Za-z0-9./_:-]+$")
// converts a argv style input into a shell command
func toShell(args []string) (result string) {
for _, arg := range args {
if result != "" {
result += " "
}
if shellOK.MatchString(arg) {
result += arg
} else {
result += "'" + arg + "'"
}
}
return result
}
// trial runs a single test
func (t *test) trial() {
cmdLine := t.nextCmdLine()
cmdString := toShell(cmdLine)
log.Printf("%q - Starting (try %d/%d)", cmdString, t.try, *maxTries)
cmd := exec.Command(cmdLine[0], cmdLine[1:]...)
start := time.Now()
t.output, t.err = cmd.CombinedOutput()
duration := time.Since(start)
t.findFailures()
if t.passed() {
log.Printf("%q - Finished OK in %v (try %d/%d)", cmdString, duration, t.try, *maxTries)
} else {
log.Printf("%q - Finished ERROR in %v (try %d/%d): %v: Failed %v", cmdString, duration, t.try, *maxTries, t.err, t.failedTests)
}
}
// cleanFs runs a single clean fs for left over directories
func (t *test) cleanFs() error {
f, err := fs.NewFs(t.remote)
if err != nil {
return err
}
entries, err := fs.ListDirSorted(f, true, "")
if err != nil {
return err
}
return entries.ForDirError(func(dir fs.Directory) error {
remote := dir.Remote()
if fstest.MatchTestRemote.MatchString(remote) {
log.Printf("Purging %s%s", t.remote, remote)
dir, err := fs.NewFs(t.remote + remote)
if err != nil {
return err
}
return fs.Purge(dir)
}
return nil
})
}
// clean runs a single clean on a fs for left over directories
func (t *test) clean() {
log.Printf("%q - Starting clean (try %d/%d)", t.remote, t.try, *maxTries)
start := time.Now()
t.err = t.cleanFs()
if t.err != nil {
log.Printf("%q - Failed to purge %v", t.remote, t.err)
}
duration := time.Since(start)
if t.passed() {
log.Printf("%q - Finished OK in %v (try %d/%d)", t.cmdString, duration, t.try, *maxTries)
} else {
log.Printf("%q - Finished ERROR in %v (try %d/%d): %v", t.cmdString, duration, t.try, *maxTries, t.err)
}
}
// passed returns true if the test passed
func (t *test) passed() bool {
return t.err == nil
}
// run runs all the trials for this test
func (t *test) run(result chan<- *test) {
for t.try = 1; t.try <= *maxTries; t.try++ {
if *clean {
if !t.subdir {
t.clean()
}
} else {
t.trial()
}
if t.passed() {
break
}
}
if !t.passed() {
t.dumpOutput()
}
result <- t
}
// makeTestBinary makes the binary we will run
func makeTestBinary() {
if runtime.GOOS == "windows" {
binary += ".exe"
}
log.Printf("Making test binary %q", binary)
err := exec.Command("go", "test", "-c", "-o", binary).Run()
if err != nil {
log.Fatalf("Failed to make test binary: %v", err)
}
if _, err := os.Stat(binary); err != nil {
log.Fatalf("Couldn't find test binary %q", binary)
}
}
// removeTestBinary removes the binary made in makeTestBinary
func removeTestBinary() {
err := os.Remove(binary) // Delete the binary when finished
if err != nil {
log.Printf("Error removing test binary %q: %v", binary, err)
}
}
func main() {
flag.Parse()
if *runTests != "" {
newRemotes := []remoteConfig{}
for _, name := range strings.Split(*runTests, ",") {
for i := range remotes {
if remotes[i].Name == name {
newRemotes = append(newRemotes, remotes[i])
goto found
}
}
log.Printf("Remote %q not found - inserting with default flags", name)
newRemotes = append(newRemotes, remoteConfig{Name: name})
found:
}
remotes = newRemotes
}
var names []string
for _, remote := range remotes {
names = append(names, remote.Name)
}
log.Printf("Testing remotes: %s", strings.Join(names, ", "))
start := time.Now()
if *clean {
fs.LoadConfig()
} else {
makeTestBinary()
defer removeTestBinary()
}
// start the tests
results := make(chan *test, 8)
awaiting := 0
bools := []bool{false, true}
if *clean {
// Don't run -subdir and -fast-list if -clean
bools = bools[:1]
}
for _, remote := range remotes {
for _, subdir := range bools {
for _, fastlist := range bools {
if (!subdir || subdir && remote.SubDir) && (!fastlist || fastlist && remote.FastList) {
go newTest(remote.Name, subdir, fastlist).run(results)
awaiting++
}
}
}
}
// Wait for the tests to finish
var failed []*test
for ; awaiting > 0; awaiting-- {
t := <-results
if !t.passed() {
failed = append(failed, t)
}
}
duration := time.Since(start)
// Summarise results
log.Printf("SUMMARY")
if len(failed) == 0 {
log.Printf("PASS: All tests finished OK in %v", duration)
} else {
log.Printf("FAIL: %d tests failed in %v", len(failed), duration)
for _, t := range failed {
log.Printf(" * %s", toShell(t.nextCmdLine()))
log.Printf(" * Failed tests: %v", t.failedTests)
}
os.Exit(1)
}
}

View File

@@ -1,6 +1,5 @@
// Walking directories
package fs
// Package walk walks directories
package walk
import (
"bytes"
@@ -11,6 +10,9 @@ import (
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/list"
"github.com/pkg/errors"
)
@@ -23,7 +25,7 @@ var ErrorSkipDir = errors.New("skip this directory")
// capable of doing a recursive listing.
var ErrorCantListR = errors.New("recursive directory listing not available")
// WalkFunc is the type of the function called for directory
// Func is the type of the function called for directory
// visited by Walk. The path argument contains remote path to the directory.
//
// If there was a problem walking to directory named by path, the
@@ -33,7 +35,7 @@ var ErrorCantListR = errors.New("recursive directory listing not available")
// sole exception is when the function returns the special value
// ErrorSkipDir. If the function returns ErrorSkipDir, Walk skips the
// directory's contents entirely.
type WalkFunc func(path string, entries DirEntries, err error) error
type Func func(path string, entries fs.DirEntries, err error) error
// Walk lists the directory.
//
@@ -53,25 +55,25 @@ type WalkFunc func(path string, entries DirEntries, err error) error
// and f supports it and level > 1, or WalkN otherwise.
//
// NB (f, path) to be replaced by fs.Dir at some point
func Walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc) error {
if (maxLevel < 0 || maxLevel > 1) && Config.UseListR && f.Features().ListR != nil {
return WalkR(f, path, includeAll, maxLevel, fn)
func Walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil {
return walkListR(f, path, includeAll, maxLevel, fn)
}
return WalkN(f, path, includeAll, maxLevel, fn)
return walkListDirSorted(f, path, includeAll, maxLevel, fn)
}
// WalkN lists the directory.
// walkListDirSorted lists the directory.
//
// It implements Walk using non recursive directory listing.
func WalkN(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc) error {
return walk(f, path, includeAll, maxLevel, fn, ListDirSorted)
func walkListDirSorted(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
return walk(f, path, includeAll, maxLevel, fn, list.DirSorted)
}
// WalkR lists the directory.
// walkListR lists the directory.
//
// It implements Walk using recursive directory listing if
// available, or returns ErrorCantListR if not.
func WalkR(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc) error {
func walkListR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
listR := f.Features().ListR
if listR == nil {
return ErrorCantListR
@@ -79,9 +81,9 @@ func WalkR(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc) error
return walkR(f, path, includeAll, maxLevel, fn, listR)
}
type listDirFunc func(fs Fs, includeAll bool, dir string) (entries DirEntries, err error)
type listDirFunc func(fs fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error)
func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir listDirFunc) error {
func walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir listDirFunc) error {
var (
wg sync.WaitGroup // sync closing of go routines
traversing sync.WaitGroup // running directory traversals
@@ -94,7 +96,7 @@ func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir
depth int
}
in := make(chan listJob, Config.Checkers)
in := make(chan listJob, fs.Config.Checkers)
errs := make(chan error, 1)
quit := make(chan struct{})
closeQuit := func() {
@@ -107,7 +109,7 @@ func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir
}()
})
}
for i := 0; i < Config.Checkers; i++ {
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
@@ -120,7 +122,7 @@ func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir
entries, err := listDir(f, includeAll, job.remote)
var jobs []listJob
if err == nil && job.depth != 0 {
entries.ForDir(func(dir Directory) {
entries.ForDir(func(dir fs.Directory) {
// Recurse for the directory
jobs = append(jobs, listJob{
remote: dir.Remote(),
@@ -134,8 +136,8 @@ func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir
// NB once we have passed entries to fn we mustn't touch it again
if err != nil && err != ErrorSkipDir {
traversing.Done()
Stats.Error(err)
Errorf(job.remote, "error listing: %v", err)
fs.CountError(err)
fs.Errorf(job.remote, "error listing: %v", err)
closeQuit()
// Send error to error channel if space
select {
@@ -176,7 +178,7 @@ func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir
}
// DirTree is a map of directories to entries
type DirTree map[string]DirEntries
type DirTree map[string]fs.DirEntries
// parentDir finds the parent directory of path
func parentDir(entryPath string) string {
@@ -188,13 +190,13 @@ func parentDir(entryPath string) string {
}
// add an entry to the tree
func (dt DirTree) add(entry DirEntry) {
func (dt DirTree) add(entry fs.DirEntry) {
dirPath := parentDir(entry.Remote())
dt[dirPath] = append(dt[dirPath], entry)
}
// add a directory entry to the tree
func (dt DirTree) addDir(entry DirEntry) {
func (dt DirTree) addDir(entry fs.DirEntry) {
dt.add(entry)
// create the directory itself if it doesn't exist already
dirPath := entry.Remote()
@@ -204,7 +206,7 @@ func (dt DirTree) addDir(entry DirEntry) {
}
// Find returns the DirEntry for filePath or nil if not found
func (dt DirTree) Find(filePath string) (parentPath string, entry DirEntry) {
func (dt DirTree) Find(filePath string) (parentPath string, entry fs.DirEntry) {
parentPath = parentDir(filePath)
for _, entry := range dt[parentPath] {
if entry.Remote() == filePath {
@@ -223,7 +225,7 @@ func (dt DirTree) checkParent(root, dirPath string) {
if entry != nil {
return
}
dt[parentPath] = append(dt[parentPath], NewDir(dirPath, time.Now()))
dt[parentPath] = append(dt[parentPath], fs.NewDir(dirPath, time.Now()))
dt.checkParent(root, parentPath)
}
@@ -264,7 +266,7 @@ func (dt DirTree) Prune(dirNames map[string]bool) error {
// true, therefore this should not
// happen. But this makes function
// more predictable.
Infof(dName, "Directory in the map for prune, but the value is false")
fs.Infof(dName, "Directory in the map for prune, but the value is false")
continue
}
if dName == "" {
@@ -277,7 +279,7 @@ func (dt DirTree) Prune(dirNames map[string]bool) error {
// such case the loop will be skipped.
for i, entry := range dt[parent] {
switch x := entry.(type) {
case Directory:
case fs.Directory:
if x.Remote() == dName {
// the slice is not sorted yet
// to delete item
@@ -289,7 +291,7 @@ func (dt DirTree) Prune(dirNames map[string]bool) error {
// iterating immediately
break
}
case Object:
case fs.Object:
// do nothing
default:
return errors.Errorf("unknown object type %T", entry)
@@ -303,7 +305,7 @@ func (dt DirTree) Prune(dirNames map[string]bool) error {
// during range iteration, they may be skipped.
for dName, remove := range dirNames {
if !remove {
Infof(dName, "Directory in the map for prune, but the value is false")
fs.Infof(dName, "Directory in the map for prune, but the value is false")
continue
}
// First, add all subdirectories to dirNames.
@@ -312,10 +314,10 @@ func (dt DirTree) Prune(dirNames map[string]bool) error {
// If so, the loop will be skipped.
for _, entry := range dt[dName] {
switch x := entry.(type) {
case Directory:
case fs.Directory:
excludeDir := x.Remote()
dirNames[excludeDir] = true
case Object:
case fs.Object:
// do nothing
default:
return errors.Errorf("unknown object type %T", entry)
@@ -338,7 +340,7 @@ func (dt DirTree) String() string {
fmt.Fprintf(out, "%s/\n", dir)
for _, entry := range dt[dir] {
flag := ""
if _, ok := entry.(Directory); ok {
if _, ok := entry.(fs.Directory); ok {
flag = "/"
}
fmt.Fprintf(out, " %s%s\n", path.Base(entry.Remote()), flag)
@@ -347,22 +349,22 @@ func (dt DirTree) String() string {
return out.String()
}
func walkRDirTree(f Fs, startPath string, includeAll bool, maxLevel int, listR ListRFn) (DirTree, error) {
func walkRDirTree(f fs.Fs, startPath string, includeAll bool, maxLevel int, listR fs.ListRFn) (DirTree, error) {
dirs := make(DirTree)
// Entries can come in arbitrary order. We use toPrune to keep
// all directories to exclude later.
toPrune := make(map[string]bool)
includeDirectory := Config.Filter.IncludeDirectory(f)
includeDirectory := filter.Active.IncludeDirectory(f)
var mu sync.Mutex
err := listR(startPath, func(entries DirEntries) error {
err := listR(startPath, func(entries fs.DirEntries) error {
mu.Lock()
defer mu.Unlock()
for _, entry := range entries {
slashes := strings.Count(entry.Remote(), "/")
switch x := entry.(type) {
case Object:
case fs.Object:
// Make sure we don't delete excluded files if not required
if includeAll || Config.Filter.IncludeObject(x) {
if includeAll || filter.Active.IncludeObject(x) {
if maxLevel < 0 || slashes <= maxLevel-1 {
dirs.add(x)
} else {
@@ -374,18 +376,18 @@ func walkRDirTree(f Fs, startPath string, includeAll bool, maxLevel int, listR L
dirs.checkParent(startPath, dirPath)
}
} else {
Debugf(x, "Excluded from sync (and deletion)")
fs.Debugf(x, "Excluded from sync (and deletion)")
}
// Check if we need to prune a directory later.
if !includeAll && len(Config.Filter.ExcludeFile) > 0 {
if !includeAll && len(filter.Active.Opt.ExcludeFile) > 0 {
basename := path.Base(x.Remote())
if basename == Config.Filter.ExcludeFile {
if basename == filter.Active.Opt.ExcludeFile {
excludeDir := parentDir(x.Remote())
toPrune[excludeDir] = true
Debugf(basename, "Excluded from sync (and deletion) based on exclude file")
fs.Debugf(basename, "Excluded from sync (and deletion) based on exclude file")
}
}
case Directory:
case fs.Directory:
inc, err := includeDirectory(x.Remote())
if err != nil {
return err
@@ -400,7 +402,7 @@ func walkRDirTree(f Fs, startPath string, includeAll bool, maxLevel int, listR L
}
}
} else {
Debugf(x, "Excluded from sync (and deletion)")
fs.Debugf(x, "Excluded from sync (and deletion)")
}
default:
return errors.Errorf("unknown object type %T", entry)
@@ -424,9 +426,9 @@ func walkRDirTree(f Fs, startPath string, includeAll bool, maxLevel int, listR L
}
// Create a DirTree using List
func walkNDirTree(f Fs, path string, includeAll bool, maxLevel int, listDir listDirFunc) (DirTree, error) {
func walkNDirTree(f fs.Fs, path string, includeAll bool, maxLevel int, listDir listDirFunc) (DirTree, error) {
dirs := make(DirTree)
fn := func(dirPath string, entries DirEntries, err error) error {
fn := func(dirPath string, entries fs.DirEntries, err error) error {
if err == nil {
dirs[dirPath] = entries
}
@@ -451,21 +453,21 @@ func walkNDirTree(f Fs, path string, includeAll bool, maxLevel int, listDir list
// and f supports it and level > 1, or WalkN otherwise.
//
// NB (f, path) to be replaced by fs.Dir at some point
func NewDirTree(f Fs, path string, includeAll bool, maxLevel int) (DirTree, error) {
if ListR := f.Features().ListR; (maxLevel < 0 || maxLevel > 1) && Config.UseListR && ListR != nil {
func NewDirTree(f fs.Fs, path string, includeAll bool, maxLevel int) (DirTree, error) {
if ListR := f.Features().ListR; (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && ListR != nil {
return walkRDirTree(f, path, includeAll, maxLevel, ListR)
}
return walkNDirTree(f, path, includeAll, maxLevel, ListDirSorted)
return walkNDirTree(f, path, includeAll, maxLevel, list.DirSorted)
}
func walkR(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listR ListRFn) error {
func walkR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR fs.ListRFn) error {
dirs, err := walkRDirTree(f, path, includeAll, maxLevel, listR)
if err != nil {
return err
}
skipping := false
skipPrefix := ""
emptyDir := DirEntries{}
emptyDir := fs.DirEntries{}
for _, dirPath := range dirs.Dirs() {
if skipping {
// Skip over directories as required
@@ -492,17 +494,17 @@ func walkR(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listR
return nil
}
// WalkGetAll runs Walk getting all the results
func WalkGetAll(f Fs, path string, includeAll bool, maxLevel int) (objs []Object, dirs []Directory, err error) {
err = Walk(f, path, includeAll, maxLevel, func(dirPath string, entries DirEntries, err error) error {
// GetAll runs Walk getting all the results
func GetAll(f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) {
err = Walk(f, path, includeAll, maxLevel, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
return err
}
for _, entry := range entries {
switch x := entry.(type) {
case Object:
case fs.Object:
objs = append(objs, x)
case Directory:
case fs.Directory:
dirs = append(dirs, x)
}
}
@@ -513,12 +515,12 @@ func WalkGetAll(f Fs, path string, includeAll bool, maxLevel int) (objs []Object
// ListRHelper is used in the implementation of ListR to accumulate DirEntries
type ListRHelper struct {
callback ListRCallback
entries DirEntries
callback fs.ListRCallback
entries fs.DirEntries
}
// NewListRHelper should be called from ListR with the callback passed in
func NewListRHelper(callback ListRCallback) *ListRHelper {
func NewListRHelper(callback fs.ListRCallback) *ListRHelper {
return &ListRHelper{
callback: callback,
}
@@ -536,7 +538,7 @@ func (lh *ListRHelper) send(max int) (err error) {
// Add an entry to the stored entries and send them if there are more
// than a certain amount
func (lh *ListRHelper) Add(entry DirEntry) error {
func (lh *ListRHelper) Add(entry fs.DirEntry) error {
if entry == nil {
return nil
}

View File

@@ -1,12 +1,14 @@
package fs
package walk
import (
"fmt"
"io"
"sync"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fstest/mockdir"
"github.com/ncw/rclone/fstest/mockobject"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -14,7 +16,7 @@ import (
type (
listResult struct {
entries DirEntries
entries fs.DirEntries
err error
}
@@ -25,7 +27,7 @@ type (
listDirs struct {
mu sync.Mutex
t *testing.T
fs Fs
fs fs.Fs
includeAll bool
results listResults
walkResults listResults
@@ -36,32 +38,7 @@ type (
}
)
var errNotImpl = errors.New("not implemented")
type mockObject string
func (o mockObject) String() string { return string(o) }
func (o mockObject) Fs() Info { return nil }
func (o mockObject) Remote() string { return string(o) }
func (o mockObject) Hash(HashType) (string, error) { return "", errNotImpl }
func (o mockObject) ModTime() (t time.Time) { return t }
func (o mockObject) Size() int64 { return 0 }
func (o mockObject) Storable() bool { return true }
func (o mockObject) SetModTime(time.Time) error { return errNotImpl }
func (o mockObject) Open(options ...OpenOption) (io.ReadCloser, error) { return nil, errNotImpl }
func (o mockObject) Update(in io.Reader, src ObjectInfo, options ...OpenOption) error {
return errNotImpl
}
func (o mockObject) Remove() error { return errNotImpl }
type unknownDirEntry string
func (o unknownDirEntry) String() string { return string(o) }
func (o unknownDirEntry) Remote() string { return string(o) }
func (o unknownDirEntry) ModTime() (t time.Time) { return t }
func (o unknownDirEntry) Size() int64 { return 0 }
func newListDirs(t *testing.T, f Fs, includeAll bool, results listResults, walkErrors errorMap, finalError error) *listDirs {
func newListDirs(t *testing.T, f fs.Fs, includeAll bool, results listResults, walkErrors errorMap, finalError error) *listDirs {
return &listDirs{
t: t,
fs: f,
@@ -88,7 +65,7 @@ func (ls *listDirs) SetLevel(maxLevel int) *listDirs {
}
// ListDir returns the expected listing for the directory
func (ls *listDirs) ListDir(f Fs, includeAll bool, dir string) (entries DirEntries, err error) {
func (ls *listDirs) ListDir(f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) {
ls.mu.Lock()
defer ls.mu.Unlock()
assert.Equal(ls.t, ls.fs, f)
@@ -109,7 +86,7 @@ func (ls *listDirs) ListDir(f Fs, includeAll bool, dir string) (entries DirEntri
}
// ListR returns the expected listing for the directory using ListR
func (ls *listDirs) ListR(dir string, callback ListRCallback) (err error) {
func (ls *listDirs) ListR(dir string, callback fs.ListRCallback) (err error) {
ls.mu.Lock()
defer ls.mu.Unlock()
@@ -140,7 +117,7 @@ func (ls *listDirs) IsFinished() {
}
// WalkFn is called by the walk to test the expectations
func (ls *listDirs) WalkFn(dir string, entries DirEntries, err error) error {
func (ls *listDirs) WalkFn(dir string, entries fs.DirEntries, err error) error {
ls.mu.Lock()
defer ls.mu.Unlock()
// ls.t.Logf("WalkFn(%q, %v, %q)", dir, entries, err)
@@ -184,14 +161,10 @@ func (ls *listDirs) WalkR() {
}
}
func newDir(name string) Directory {
return NewDir(name, time.Time{})
}
func testWalkEmpty(t *testing.T) *listDirs {
return newListDirs(t, nil, false,
listResults{
"": {entries: DirEntries{}, err: nil},
"": {entries: fs.DirEntries{}, err: nil},
},
errorMap{
"": nil,
@@ -205,7 +178,7 @@ func TestWalkREmpty(t *testing.T) { testWalkEmpty(t).WalkR() }
func testWalkEmptySkip(t *testing.T) *listDirs {
return newListDirs(t, nil, true,
listResults{
"": {entries: DirEntries{}, err: nil},
"": {entries: fs.DirEntries{}, err: nil},
},
errorMap{
"": ErrorSkipDir,
@@ -219,12 +192,12 @@ func TestWalkREmptySkip(t *testing.T) { testWalkEmptySkip(t).WalkR() }
func testWalkNotFound(t *testing.T) *listDirs {
return newListDirs(t, nil, true,
listResults{
"": {err: ErrorDirNotFound},
"": {err: fs.ErrorDirNotFound},
},
errorMap{
"": ErrorDirNotFound,
"": fs.ErrorDirNotFound,
},
ErrorDirNotFound,
fs.ErrorDirNotFound,
)
}
func TestWalkNotFound(t *testing.T) { testWalkNotFound(t).Walk() }
@@ -234,7 +207,7 @@ func TestWalkNotFoundMaskError(t *testing.T) {
// this doesn't work for WalkR
newListDirs(t, nil, true,
listResults{
"": {err: ErrorDirNotFound},
"": {err: fs.ErrorDirNotFound},
},
errorMap{
"": nil,
@@ -247,7 +220,7 @@ func TestWalkNotFoundSkipkError(t *testing.T) {
// this doesn't work for WalkR
newListDirs(t, nil, true,
listResults{
"": {err: ErrorDirNotFound},
"": {err: fs.ErrorDirNotFound},
},
errorMap{
"": ErrorSkipDir,
@@ -257,21 +230,21 @@ func TestWalkNotFoundSkipkError(t *testing.T) {
}
func testWalkLevels(t *testing.T, maxLevel int) *listDirs {
da := newDir("a")
oA := mockObject("A")
db := newDir("a/b")
oB := mockObject("a/B")
dc := newDir("a/b/c")
oC := mockObject("a/b/C")
dd := newDir("a/b/c/d")
oD := mockObject("a/b/c/D")
da := mockdir.New("a")
oA := mockobject.Object("A")
db := mockdir.New("a/b")
oB := mockobject.Object("a/B")
dc := mockdir.New("a/b/c")
oC := mockobject.Object("a/b/C")
dd := mockdir.New("a/b/c/d")
oD := mockobject.Object("a/b/c/D")
return newListDirs(t, nil, false,
listResults{
"": {entries: DirEntries{oA, da}, err: nil},
"a": {entries: DirEntries{oB, db}, err: nil},
"a/b": {entries: DirEntries{oC, dc}, err: nil},
"a/b/c": {entries: DirEntries{oD, dd}, err: nil},
"a/b/c/d": {entries: DirEntries{}, err: nil},
"": {entries: fs.DirEntries{oA, da}, err: nil},
"a": {entries: fs.DirEntries{oB, db}, err: nil},
"a/b": {entries: fs.DirEntries{oC, dc}, err: nil},
"a/b/c": {entries: fs.DirEntries{oD, dd}, err: nil},
"a/b/c/d": {entries: fs.DirEntries{}, err: nil},
},
errorMap{
"": nil,
@@ -309,11 +282,11 @@ a/b/c/d/
}
func testWalkLevelsNoRecursive(t *testing.T) *listDirs {
da := newDir("a")
oA := mockObject("A")
da := mockdir.New("a")
oA := mockobject.Object("A")
return newListDirs(t, nil, false,
listResults{
"": {entries: DirEntries{oA, da}, err: nil},
"": {entries: fs.DirEntries{oA, da}, err: nil},
},
errorMap{
"": nil,
@@ -325,14 +298,14 @@ func TestWalkLevelsNoRecursive(t *testing.T) { testWalkLevelsNoRecursive(t).Wal
func TestWalkRLevelsNoRecursive(t *testing.T) { testWalkLevelsNoRecursive(t).WalkR() }
func testWalkLevels2(t *testing.T) *listDirs {
da := newDir("a")
oA := mockObject("A")
db := newDir("a/b")
oB := mockObject("a/B")
da := mockdir.New("a")
oA := mockobject.Object("A")
db := mockdir.New("a/b")
oB := mockobject.Object("a/B")
return newListDirs(t, nil, false,
listResults{
"": {entries: DirEntries{oA, da}, err: nil},
"a": {entries: DirEntries{oB, db}, err: nil},
"": {entries: fs.DirEntries{oA, da}, err: nil},
"a": {entries: fs.DirEntries{oB, db}, err: nil},
},
errorMap{
"": nil,
@@ -345,14 +318,14 @@ func TestWalkLevels2(t *testing.T) { testWalkLevels2(t).Walk() }
func TestWalkRLevels2(t *testing.T) { testWalkLevels2(t).WalkR() }
func testWalkSkip(t *testing.T) *listDirs {
da := newDir("a")
db := newDir("a/b")
dc := newDir("a/b/c")
da := mockdir.New("a")
db := mockdir.New("a/b")
dc := mockdir.New("a/b/c")
return newListDirs(t, nil, false,
listResults{
"": {entries: DirEntries{da}, err: nil},
"a": {entries: DirEntries{db}, err: nil},
"a/b": {entries: DirEntries{dc}, err: nil},
"": {entries: fs.DirEntries{da}, err: nil},
"a": {entries: fs.DirEntries{db}, err: nil},
"a/b": {entries: fs.DirEntries{dc}, err: nil},
},
errorMap{
"": nil,
@@ -368,19 +341,19 @@ func TestWalkRSkip(t *testing.T) { testWalkSkip(t).WalkR() }
func testWalkErrors(t *testing.T) *listDirs {
lr := listResults{}
em := errorMap{}
de := make(DirEntries, 10)
de := make(fs.DirEntries, 10)
for i := range de {
path := string('0' + i)
de[i] = newDir(path)
lr[path] = listResult{entries: nil, err: ErrorDirNotFound}
em[path] = ErrorDirNotFound
de[i] = mockdir.New(path)
lr[path] = listResult{entries: nil, err: fs.ErrorDirNotFound}
em[path] = fs.ErrorDirNotFound
}
lr[""] = listResult{entries: de, err: nil}
em[""] = nil
return newListDirs(t, nil, true,
lr,
em,
ErrorDirNotFound,
fs.ErrorDirNotFound,
).NoCheckMaps()
}
func TestWalkErrors(t *testing.T) { testWalkErrors(t).Walk() }
@@ -393,14 +366,14 @@ func makeTree(level int, terminalErrors bool) (listResults, errorMap) {
em := errorMap{}
var fill func(path string, level int)
fill = func(path string, level int) {
de := DirEntries{}
de := fs.DirEntries{}
if level > 0 {
for _, a := range "0123456789" {
subPath := string(a)
if path != "" {
subPath = path + "/" + subPath
}
de = append(de, newDir(subPath))
de = append(de, mockdir.New(subPath))
fill(subPath, level-1)
}
}
@@ -437,8 +410,8 @@ func TestWalkMultiErrors(t *testing.T) { testWalkMultiErrors(t).Walk() }
func TestWalkRMultiErrors(t *testing.T) { testWalkMultiErrors(t).Walk() }
// a very simple listRcallback function
func makeListRCallback(entries DirEntries, err error) ListRFn {
return func(dir string, callback ListRCallback) error {
func makeListRCallback(entries fs.DirEntries, err error) fs.ListRFn {
return func(dir string, callback fs.ListRCallback) error {
if err == nil {
err = callback(entries)
}
@@ -448,22 +421,22 @@ func makeListRCallback(entries DirEntries, err error) ListRFn {
func TestWalkRDirTree(t *testing.T) {
for _, test := range []struct {
entries DirEntries
entries fs.DirEntries
want string
err error
root string
level int
}{
{DirEntries{}, "/\n", nil, "", -1},
{DirEntries{mockObject("a")}, `/
{fs.DirEntries{}, "/\n", nil, "", -1},
{fs.DirEntries{mockobject.Object("a")}, `/
a
`, nil, "", -1},
{DirEntries{mockObject("a/b")}, `/
{fs.DirEntries{mockobject.Object("a/b")}, `/
a/
a/
b
`, nil, "", -1},
{DirEntries{mockObject("a/b/c/d")}, `/
{fs.DirEntries{mockobject.Object("a/b/c/d")}, `/
a/
a/
b/
@@ -472,17 +445,17 @@ a/b/
a/b/c/
d
`, nil, "", -1},
{DirEntries{mockObject("a")}, "", errorBoom, "", -1},
{DirEntries{
mockObject("0/1/2/3"),
mockObject("4/5/6/7"),
mockObject("8/9/a/b"),
mockObject("c/d/e/f"),
mockObject("g/h/i/j"),
mockObject("k/l/m/n"),
mockObject("o/p/q/r"),
mockObject("s/t/u/v"),
mockObject("w/x/y/z"),
{fs.DirEntries{mockobject.Object("a")}, "", errorBoom, "", -1},
{fs.DirEntries{
mockobject.Object("0/1/2/3"),
mockobject.Object("4/5/6/7"),
mockobject.Object("8/9/a/b"),
mockobject.Object("c/d/e/f"),
mockobject.Object("g/h/i/j"),
mockobject.Object("k/l/m/n"),
mockobject.Object("o/p/q/r"),
mockobject.Object("s/t/u/v"),
mockobject.Object("w/x/y/z"),
}, `/
0/
4/
@@ -548,10 +521,10 @@ w/x/
w/x/y/
z
`, nil, "", -1},
{DirEntries{
mockObject("a/b/c/d/e/f1"),
mockObject("a/b/c/d/e/f2"),
mockObject("a/b/c/d/e/f3"),
{fs.DirEntries{
mockobject.Object("a/b/c/d/e/f1"),
mockobject.Object("a/b/c/d/e/f2"),
mockobject.Object("a/b/c/d/e/f3"),
}, `a/b/c/
d/
a/b/c/d/
@@ -561,12 +534,12 @@ a/b/c/d/e/
f2
f3
`, nil, "a/b/c", -1},
{DirEntries{
mockObject("A"),
mockObject("a/B"),
mockObject("a/b/C"),
mockObject("a/b/c/D"),
mockObject("a/b/c/d/E"),
{fs.DirEntries{
mockobject.Object("A"),
mockobject.Object("a/B"),
mockobject.Object("a/b/C"),
mockobject.Object("a/b/c/D"),
mockobject.Object("a/b/c/d/E"),
}, `/
A
a/
@@ -574,9 +547,9 @@ a/
B
b/
`, nil, "", 2},
{DirEntries{
mockObject("a/b/c"),
mockObject("a/b/c/d/e"),
{fs.DirEntries{
mockobject.Object("a/b/c"),
mockobject.Object("a/b/c/d/e"),
}, `/
a/
a/
@@ -591,7 +564,7 @@ a/
func TestWalkRDirTreeExclude(t *testing.T) {
for _, test := range []struct {
entries DirEntries
entries fs.DirEntries
want string
err error
root string
@@ -599,21 +572,21 @@ func TestWalkRDirTreeExclude(t *testing.T) {
excludeFile string
includeAll bool
}{
{DirEntries{mockObject("a"), mockObject("ignore")}, "", nil, "", -1, "ignore", false},
{DirEntries{mockObject("a")}, `/
{fs.DirEntries{mockobject.Object("a"), mockobject.Object("ignore")}, "", nil, "", -1, "ignore", false},
{fs.DirEntries{mockobject.Object("a")}, `/
a
`, nil, "", -1, "ignore", false},
{DirEntries{
mockObject("a"),
mockObject("b/b"),
mockObject("b/.ignore"),
{fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b/b"),
mockobject.Object("b/.ignore"),
}, `/
a
`, nil, "", -1, ".ignore", false},
{DirEntries{
mockObject("a"),
mockObject("b/.ignore"),
mockObject("b/b"),
{fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b/.ignore"),
mockobject.Object("b/b"),
}, `/
a
b/
@@ -621,24 +594,24 @@ b/
.ignore
b
`, nil, "", -1, ".ignore", true},
{DirEntries{
mockObject("a"),
mockObject("b/b"),
mockObject("b/c/d/e"),
mockObject("b/c/ign"),
mockObject("b/c/x"),
{fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b/b"),
mockobject.Object("b/c/d/e"),
mockobject.Object("b/c/ign"),
mockobject.Object("b/c/x"),
}, `/
a
b/
b/
b
`, nil, "", -1, "ign", false},
{DirEntries{
mockObject("a"),
mockObject("b/b"),
mockObject("b/c/d/e"),
mockObject("b/c/ign"),
mockObject("b/c/x"),
{fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b/b"),
mockobject.Object("b/c/d/e"),
mockobject.Object("b/c/ign"),
mockobject.Object("b/c/x"),
}, `/
a
b/
@@ -653,11 +626,11 @@ b/c/d/
e
`, nil, "", -1, "ign", true},
} {
Config.Filter.ExcludeFile = test.excludeFile
filter.Active.Opt.ExcludeFile = test.excludeFile
r, err := walkRDirTree(nil, test.root, test.includeAll, test.level, makeListRCallback(test.entries, test.err))
assert.Equal(t, test.err, err, fmt.Sprintf("%+v", test))
assert.Equal(t, test.want, r.String(), fmt.Sprintf("%+v", test))
}
// Set to default value, to avoid side effects
Config.Filter.ExcludeFile = ""
filter.Active.Opt.ExcludeFile = ""
}