1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-05 18:13:17 +00:00

Break the fs package up into smaller parts.

The purpose of this is to make it easier to maintain and eventually to
allow the rclone backends to be re-used in other projects without
having to use the rclone configuration system.

The new code layout is documented in CONTRIBUTING.
This commit is contained in:
Nick Craig-Wood
2018-01-12 16:30:54 +00:00
parent 92624bbbf1
commit 11da2a6c9b
183 changed files with 5749 additions and 5063 deletions

View File

@@ -22,6 +22,10 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/unicode/norm"
@@ -54,8 +58,8 @@ func Initialise() {
// Never ask for passwords, fail instead.
// If your local config is encrypted set environment variable
// "RCLONE_CONFIG_PASS=hunter2" (or your password)
*fs.AskPassword = false
fs.LoadConfig()
fs.Config.AskPassword = false
config.LoadConfig()
if *Verbose {
fs.Config.LogLevel = fs.LogLevelDebug
}
@@ -72,7 +76,7 @@ func Initialise() {
// Item represents an item for checking
type Item struct {
Path string
Hashes map[fs.HashType]string
Hashes map[hash.Type]string
ModTime time.Time
Size int64
WinPath string
@@ -85,7 +89,7 @@ func NewItem(Path, Content string, modTime time.Time) Item {
ModTime: modTime,
Size: int64(len(Content)),
}
hash := fs.NewMultiHasher()
hash := hash.NewMultiHasher()
buf := bytes.NewBufferString(Content)
_, err := io.Copy(hash, buf)
if err != nil {
@@ -115,11 +119,11 @@ func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, prec
func (i *Item) CheckHashes(t *testing.T, obj fs.Object) {
require.NotNil(t, obj)
types := obj.Fs().Hashes().Array()
for _, hash := range types {
for _, Hash := range types {
// Check attributes
sum, err := obj.Hash(hash)
sum, err := obj.Hash(Hash)
require.NoError(t, err)
assert.True(t, fs.HashEquals(i.Hashes[hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), hash, i.Hashes[hash], sum))
assert.True(t, hash.Equals(i.Hashes[Hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), Hash, i.Hashes[Hash], sum))
}
}
@@ -252,7 +256,7 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs
expectedDirs = filterEmptyDirs(t, items, expectedDirs)
}
is := NewItems(items)
oldErrors := fs.Stats.GetErrors()
oldErrors := accounting.Stats.GetErrors()
var objs []fs.Object
var dirs []fs.Directory
var err error
@@ -262,7 +266,7 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs
gotListing := "<unset>"
listingOK := false
for i := 1; i <= retries; i++ {
objs, dirs, err = fs.WalkGetAll(f, "", true, -1)
objs, dirs, err = walk.GetAll(f, "", true, -1)
if err != nil && err != fs.ErrorDirNotFound {
t.Fatalf("Error listing: %v", err)
}
@@ -294,8 +298,8 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs
}
is.Done(t)
// Don't notice an error when listing an empty directory
if len(items) == 0 && oldErrors == 0 && fs.Stats.GetErrors() == 1 {
fs.Stats.ResetErrors()
if len(items) == 0 && oldErrors == 0 && accounting.Stats.GetErrors() == 1 {
accounting.Stats.ResetErrors()
}
// Check the directories
if expectedDirs != nil {
@@ -418,9 +422,9 @@ func RandomRemote(remoteName string, subdir bool) (fs.Fs, string, func(), error)
}
finalise := func() {
_ = fs.Purge(remote) // ignore error
Purge(remote)
if parentRemote != nil {
err = fs.Purge(parentRemote) // ignore error
Purge(parentRemote)
if err != nil {
log.Printf("Failed to purge %v: %v", parentRemote, err)
}
@@ -430,22 +434,48 @@ func RandomRemote(remoteName string, subdir bool) (fs.Fs, string, func(), error)
return remote, remoteName, finalise, nil
}
// TestMkdir tests Mkdir works
func TestMkdir(t *testing.T, remote fs.Fs) {
err := fs.Mkdir(remote, "")
require.NoError(t, err)
CheckListing(t, remote, []Item{})
}
// TestPurge tests Purge works
func TestPurge(t *testing.T, remote fs.Fs) {
err := fs.Purge(remote)
require.NoError(t, err)
CheckListing(t, remote, []Item{})
}
// TestRmdir tests Rmdir works
func TestRmdir(t *testing.T, remote fs.Fs) {
err := fs.Rmdir(remote, "")
require.NoError(t, err)
// Purge is a simplified re-implementation of operations.Purge for the
// test routine cleanup to avoid circular dependencies.
//
// It logs errors rather than returning them
func Purge(f fs.Fs) {
var err error
doFallbackPurge := true
if doPurge := f.Features().Purge; doPurge != nil {
doFallbackPurge = false
err = doPurge()
if err == fs.ErrorCantPurge {
doFallbackPurge = true
}
}
if doFallbackPurge {
var dirs []string
err = walk.Walk(f, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
log.Printf("purge walk returned error: %v", err)
return nil
}
entries.ForObject(func(obj fs.Object) {
err = obj.Remove()
if err != nil {
log.Printf("purge failed to remove %q: %v", obj.Remote(), err)
}
})
entries.ForDir(func(dir fs.Directory) {
dirs = append(dirs, dir.Remote())
})
return nil
})
sort.Strings(dirs)
for i := len(dirs) - 1; i >= 0; i-- {
dir := dirs[i]
err := f.Rmdir(dir)
if err != nil {
log.Printf("purge failed to rmdir %q: %v", dir, err)
}
}
}
if err != nil {
log.Printf("purge failed: %v", err)
}
}

View File

@@ -19,6 +19,12 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/fstest"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
@@ -84,7 +90,7 @@ func TestInit(t *testing.T) {
// Set extra config if supplied
for _, item := range ExtraConfig {
fs.ConfigFileSet(item.Name, item.Key, item.Value)
config.FileSet(item.Name, item.Key, item.Value)
}
if *fstest.RemoteName != "" {
RemoteName = *fstest.RemoteName
@@ -99,7 +105,10 @@ func TestInit(t *testing.T) {
newFs(t)
skipIfNotOk(t)
fstest.TestMkdir(t, remote)
err = remote.Mkdir("")
require.NoError(t, err)
fstest.CheckListing(t, remote, []fstest.Item{})
}
func skipIfNotOk(t *testing.T) {
@@ -156,7 +165,8 @@ func TestFsRoot(t *testing.T) {
// TestFsRmdirEmpty tests deleting an empty directory
func TestFsRmdirEmpty(t *testing.T) {
skipIfNotOk(t)
fstest.TestRmdir(t, remote)
err := remote.Rmdir("")
require.NoError(t, err)
}
// TestFsRmdirNotFound tests deleting a non existent directory
@@ -175,23 +185,27 @@ func TestFsMkdir(t *testing.T) {
// (eg azure blob)
newFs(t)
fstest.TestMkdir(t, remote)
fstest.TestMkdir(t, remote)
err := remote.Mkdir("")
require.NoError(t, err)
fstest.CheckListing(t, remote, []fstest.Item{})
err = remote.Mkdir("")
require.NoError(t, err)
}
// TestFsMkdirRmdirSubdir tests making and removing a sub directory
func TestFsMkdirRmdirSubdir(t *testing.T) {
skipIfNotOk(t)
dir := "dir/subdir"
err := fs.Mkdir(remote, dir)
err := operations.Mkdir(remote, dir)
require.NoError(t, err)
fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir", "dir/subdir"}, fs.Config.ModifyWindow)
err = fs.Rmdir(remote, dir)
err = operations.Rmdir(remote, dir)
require.NoError(t, err)
fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir"}, fs.Config.ModifyWindow)
err = fs.Rmdir(remote, "dir")
err = operations.Rmdir(remote, "dir")
require.NoError(t, err)
fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.Config.ModifyWindow)
}
@@ -236,7 +250,7 @@ func objsToNames(objs []fs.Object) []string {
// TestFsListDirEmpty tests listing the directories from an empty directory
func TestFsListDirEmpty(t *testing.T) {
skipIfNotOk(t)
objs, dirs, err := fs.WalkGetAll(remote, "", true, 1)
objs, dirs, err := walk.GetAll(remote, "", true, 1)
require.NoError(t, err)
assert.Equal(t, []string{}, objsToNames(objs))
assert.Equal(t, []string{}, dirsToNames(dirs))
@@ -282,15 +296,15 @@ func testPut(t *testing.T, file *fstest.Item) string {
again:
contents := fstest.RandomString(100)
buf := bytes.NewBufferString(contents)
hash := fs.NewMultiHasher()
hash := hash.NewMultiHasher()
in := io.TeeReader(buf, hash)
file.Size = int64(buf.Len())
obji := fs.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
obj, err := remote.Put(in, obji)
if err != nil {
// Retry if err returned a retry error
if fs.IsRetryError(err) && tries < maxTries {
if fserrors.IsRetryError(err) && tries < maxTries {
t.Logf("Put error: %v - low level retry %d/%d", err, tries, maxTries)
time.Sleep(2 * time.Second)
@@ -334,7 +348,7 @@ func TestFsPutError(t *testing.T) {
er := &errorReader{errors.New("potato")}
in := io.MultiReader(buf, er)
obji := fs.NewStaticObjectInfo(file2.Path, file2.ModTime, 100, true, nil, nil)
obji := object.NewStaticObjectInfo(file2.Path, file2.ModTime, 100, true, nil, nil)
_, err := remote.Put(in, obji)
// assert.Nil(t, obj) - FIXME some remotes return the object even on nil
assert.NotNil(t, err)
@@ -364,9 +378,9 @@ func TestFsListDirFile2(t *testing.T) {
list := func(dir string, expectedDirNames, expectedObjNames []string) {
var objNames, dirNames []string
for i := 1; i <= *fstest.ListRetries; i++ {
objs, dirs, err := fs.WalkGetAll(remote, dir, true, 1)
objs, dirs, err := walk.GetAll(remote, dir, true, 1)
if errors.Cause(err) == fs.ErrorDirNotFound {
objs, dirs, err = fs.WalkGetAll(remote, winPath(dir), true, 1)
objs, dirs, err = walk.GetAll(remote, winPath(dir), true, 1)
}
require.NoError(t, err)
objNames = objsToNames(objs)
@@ -413,7 +427,7 @@ func TestFsListDirRoot(t *testing.T) {
skipIfNotOk(t)
rootRemote, err := fs.NewFs(RemoteName)
require.NoError(t, err)
_, dirs, err := fs.WalkGetAll(rootRemote, "", true, 1)
_, dirs, err := walk.GetAll(rootRemote, "", true, 1)
require.NoError(t, err)
assert.Contains(t, dirsToNames(dirs), subRemoteLeaf, "Remote leaf not found")
}
@@ -434,7 +448,7 @@ func TestFsListSubdir(t *testing.T) {
for i := 0; i < 2; i++ {
dir, _ := path.Split(fileName)
dir = dir[:len(dir)-1]
objs, dirs, err = fs.WalkGetAll(remote, dir, true, -1)
objs, dirs, err = walk.GetAll(remote, dir, true, -1)
if err != fs.ErrorDirNotFound {
break
}
@@ -455,7 +469,7 @@ func TestFsListRSubdir(t *testing.T) {
// TestFsListLevel2 tests List works for 2 levels
func TestFsListLevel2(t *testing.T) {
skipIfNotOk(t)
objs, dirs, err := fs.WalkGetAll(remote, "", true, 2)
objs, dirs, err := walk.GetAll(remote, "", true, 2)
if err == fs.ErrorLevelNotSupported {
return
}
@@ -676,7 +690,7 @@ func TestFsDirChangeNotify(t *testing.T) {
t.Skip("FS has no DirChangeNotify interface")
}
err := fs.Mkdir(remote, "dir")
err := operations.Mkdir(remote, "dir")
require.NoError(t, err)
changes := []string{}
@@ -685,7 +699,7 @@ func TestFsDirChangeNotify(t *testing.T) {
}, time.Second)
defer func() { close(quitChannel) }()
err = fs.Mkdir(remote, "dir/subdir")
err = operations.Mkdir(remote, "dir/subdir")
require.NoError(t, err)
time.Sleep(2 * time.Second)
@@ -817,12 +831,12 @@ func TestObjectUpdate(t *testing.T) {
skipIfNotOk(t)
contents := fstest.RandomString(200)
buf := bytes.NewBufferString(contents)
hash := fs.NewMultiHasher()
hash := hash.NewMultiHasher()
in := io.TeeReader(buf, hash)
file1.Size = int64(buf.Len())
obj := findObject(t, file1.Path)
obji := fs.NewStaticObjectInfo(file1.Path, file1.ModTime, int64(len(contents)), true, nil, obj.Fs())
obji := object.NewStaticObjectInfo(file1.Path, file1.ModTime, int64(len(contents)), true, nil, obj.Fs())
err := obj.Update(in, obji)
require.NoError(t, err)
file1.Hashes = hash.Sums()
@@ -896,15 +910,15 @@ again:
contentSize := 100
contents := fstest.RandomString(contentSize)
buf := bytes.NewBufferString(contents)
hash := fs.NewMultiHasher()
hash := hash.NewMultiHasher()
in := io.TeeReader(buf, hash)
file.Size = -1
obji := fs.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
obj, err := remote.Features().PutStream(in, obji)
if err != nil {
// Retry if err returned a retry error
if fs.IsRetryError(err) && tries < maxTries {
if fserrors.IsRetryError(err) && tries < maxTries {
t.Logf("Put error: %v - low level retry %d/%d", err, tries, maxTries)
time.Sleep(2 * time.Second)
@@ -924,8 +938,12 @@ again:
// TestObjectPurge tests Purge
func TestObjectPurge(t *testing.T) {
skipIfNotOk(t)
fstest.TestPurge(t, remote)
err := fs.Purge(remote)
err := operations.Purge(remote)
require.NoError(t, err)
fstest.CheckListing(t, remote, []fstest.Item{})
err = operations.Purge(remote)
assert.Error(t, err, "Expecting error after on second purge")
}

13
fstest/mockdir/dir.go Normal file
View File

@@ -0,0 +1,13 @@
// Package mockdir makes a mock fs.Directory object
package mockdir
import (
"time"
"github.com/ncw/rclone/fs"
)
// New makes a mock directory object with the name given
func New(name string) fs.Directory {
return fs.NewDir(name, time.Time{})
}

View File

@@ -0,0 +1,71 @@
// Package mockobject provides a mock object which can be created from a string
package mockobject
import (
"errors"
"io"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
)
var errNotImpl = errors.New("not implemented")
// Object is a mock fs.Object useful for testing
type Object string
// String returns a description of the Object
func (o Object) String() string {
return string(o)
}
// Fs returns read only access to the Fs that this object is part of
func (o Object) Fs() fs.Info {
return nil
}
// Remote returns the remote path
func (o Object) Remote() string {
return string(o)
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o Object) Hash(hash.Type) (string, error) {
return "", errNotImpl
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o Object) ModTime() (t time.Time) {
return t
}
// Size returns the size of the file
func (o Object) Size() int64 { return 0 }
// Storable says whether this object can be stored
func (o Object) Storable() bool {
return true
}
// SetModTime sets the metadata on the object to set the modification date
func (o Object) SetModTime(time.Time) error {
return errNotImpl
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
return nil, errNotImpl
}
// Update in to the object with the modTime given of the given size
func (o Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errNotImpl
}
// Remove this object
func (o Object) Remove() error {
return errNotImpl
}

View File

@@ -37,7 +37,9 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fs/walk"
"github.com/stretchr/testify/require"
)
@@ -127,7 +129,7 @@ func NewRun(t *testing.T) *Run {
*r = *oneRun
r.cleanRemote = func() {
var toDelete dirsToRemove
err := fs.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
err := walk.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
if err == fs.ErrorDirNotFound {
return nil
@@ -231,13 +233,13 @@ func (r *Run) WriteObjectTo(f fs.Fs, remote, content string, modTime time.Time,
const maxTries = 10
for tries := 1; ; tries++ {
in := bytes.NewBufferString(content)
objinfo := fs.NewStaticObjectInfo(remote, modTime, int64(len(content)), true, nil, nil)
objinfo := object.NewStaticObjectInfo(remote, modTime, int64(len(content)), true, nil, nil)
_, err := put(in, objinfo)
if err == nil {
break
}
// Retry if err returned a retry error
if fs.IsRetryError(err) && tries < maxTries {
if fserrors.IsRetryError(err) && tries < maxTries {
r.Logf("Retry Put of %q to %v: %d/%d (%v)", remote, f, tries, maxTries, err)
time.Sleep(2 * time.Second)
continue
@@ -265,14 +267,15 @@ func (r *Run) WriteBoth(remote, content string, modTime time.Time) Item {
// CheckWithDuplicates does a test but allows duplicates
func (r *Run) CheckWithDuplicates(t *testing.T, items ...Item) {
objects, size, err := fs.Count(r.Fremote)
require.NoError(t, err)
assert.Equal(t, int64(len(items)), objects)
wantSize := int64(0)
for _, item := range items {
wantSize += item.Size
}
assert.Equal(t, wantSize, size)
panic("FIXME")
// objects, size, err := operations.Count(r.Fremote)
// require.NoError(t, err)
// assert.Equal(t, int64(len(items)), objects)
// wantSize := int64(0)
// for _, item := range items {
// wantSize += item.Size
// }
// assert.Equal(t, wantSize, size)
}
// Clean the temporary directory

464
fstest/test_all/test_all.go Normal file
View File

@@ -0,0 +1,464 @@
// Run tests for all the remotes. Run this with package names which
// need integration testing.
//
// See the `test` target in the Makefile.
package main
import (
"flag"
"go/build"
"log"
"os"
"os/exec"
"path"
"regexp"
"runtime"
"strings"
"time"
_ "github.com/ncw/rclone/backend/all" // import all fs
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/list"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest"
)
type remoteConfig struct {
Name string
SubDir bool
FastList bool
}
var (
remotes = []remoteConfig{
{
Name: "TestAmazonCloudDrive:",
SubDir: false,
FastList: false,
},
{
Name: "TestB2:",
SubDir: true,
FastList: true,
},
{
Name: "TestCryptDrive:",
SubDir: false,
FastList: false,
},
{
Name: "TestCryptSwift:",
SubDir: false,
FastList: false,
},
{
Name: "TestDrive:",
SubDir: false,
FastList: false,
},
{
Name: "TestDropbox:",
SubDir: false,
FastList: false,
},
{
Name: "TestGoogleCloudStorage:",
SubDir: true,
FastList: true,
},
{
Name: "TestHubic:",
SubDir: false,
FastList: false,
},
{
Name: "TestOneDrive:",
SubDir: false,
FastList: false,
},
{
Name: "TestS3:",
SubDir: true,
FastList: true,
},
{
Name: "TestSftp:",
SubDir: false,
FastList: false,
},
{
Name: "TestSwift:",
SubDir: true,
FastList: true,
},
{
Name: "TestYandex:",
SubDir: false,
FastList: false,
},
{
Name: "TestFTP:",
SubDir: false,
FastList: false,
},
{
Name: "TestBox:",
SubDir: false,
FastList: false,
},
{
Name: "TestQingStor:",
SubDir: false,
FastList: false,
},
{
Name: "TestAzureBlob:",
SubDir: true,
FastList: true,
},
{
Name: "TestPcloud:",
SubDir: false,
FastList: false,
},
{
Name: "TestWebdav:",
SubDir: false,
FastList: false,
},
{
Name: "TestCache:",
SubDir: false,
FastList: false,
},
}
// Flags
maxTries = flag.Int("maxtries", 5, "Number of times to try each test")
runTests = flag.String("remotes", "", "Comma separated list of remotes to test, eg 'TestSwift:,TestS3'")
clean = flag.Bool("clean", false, "Instead of testing, clean all left over test directories")
runOnly = flag.String("run", "", "Run only those tests matching the regexp supplied")
timeout = flag.Duration("timeout", 30*time.Minute, "Maximum time to run each test for before giving up")
)
// test holds info about a running test
type test struct {
pkg string
remote string
subdir bool
cmdLine []string
cmdString string
try int
err error
output []byte
failedTests []string
runFlag string
}
// newTest creates a new test
func newTest(pkg, remote string, subdir bool, fastlist bool) *test {
binary := pkgBinary(pkg)
t := &test{
pkg: pkg,
remote: remote,
subdir: subdir,
cmdLine: []string{binary, "-test.timeout", (*timeout).String(), "-remote", remote},
try: 1,
}
if *fstest.Verbose {
t.cmdLine = append(t.cmdLine, "-test.v")
fs.Config.LogLevel = fs.LogLevelDebug
}
if *runOnly != "" {
t.cmdLine = append(t.cmdLine, "-test.run", *runOnly)
}
if subdir {
t.cmdLine = append(t.cmdLine, "-subdir")
}
if fastlist {
t.cmdLine = append(t.cmdLine, "-fast-list")
}
t.cmdString = toShell(t.cmdLine)
return t
}
// dumpOutput prints the error output
func (t *test) dumpOutput() {
log.Println("------------------------------------------------------------")
log.Printf("---- %q ----", t.cmdString)
log.Println(string(t.output))
log.Println("------------------------------------------------------------")
}
var failRe = regexp.MustCompile(`(?m)^--- FAIL: (Test\w*) \(`)
// findFailures looks for all the tests which failed
func (t *test) findFailures() {
oldFailedTests := t.failedTests
t.failedTests = nil
for _, matches := range failRe.FindAllSubmatch(t.output, -1) {
t.failedTests = append(t.failedTests, string(matches[1]))
}
if len(t.failedTests) != 0 {
t.runFlag = "^(" + strings.Join(t.failedTests, "|") + ")$"
} else {
t.runFlag = ""
}
if t.passed() && len(t.failedTests) != 0 {
log.Printf("%q - Expecting no errors but got: %v", t.cmdString, t.failedTests)
t.dumpOutput()
} else if !t.passed() && len(t.failedTests) == 0 {
log.Printf("%q - Expecting errors but got none: %v", t.cmdString, t.failedTests)
t.dumpOutput()
t.failedTests = oldFailedTests
}
}
// nextCmdLine returns the next command line
func (t *test) nextCmdLine() []string {
cmdLine := t.cmdLine[:]
if t.runFlag != "" {
cmdLine = append(cmdLine, "-test.run", t.runFlag)
}
return cmdLine
}
// if matches then is definitely OK in the shell
var shellOK = regexp.MustCompile("^[A-Za-z0-9./_:-]+$")
// converts a argv style input into a shell command
func toShell(args []string) (result string) {
for _, arg := range args {
if result != "" {
result += " "
}
if shellOK.MatchString(arg) {
result += arg
} else {
result += "'" + arg + "'"
}
}
return result
}
// trial runs a single test
func (t *test) trial() {
cmdLine := t.nextCmdLine()
cmdString := toShell(cmdLine)
log.Printf("%q - Starting (try %d/%d)", cmdString, t.try, *maxTries)
cmd := exec.Command(cmdLine[0], cmdLine[1:]...)
start := time.Now()
t.output, t.err = cmd.CombinedOutput()
duration := time.Since(start)
t.findFailures()
if t.passed() {
log.Printf("%q - Finished OK in %v (try %d/%d)", cmdString, duration, t.try, *maxTries)
} else {
log.Printf("%q - Finished ERROR in %v (try %d/%d): %v: Failed %v", cmdString, duration, t.try, *maxTries, t.err, t.failedTests)
}
}
// cleanFs runs a single clean fs for left over directories
func (t *test) cleanFs() error {
f, err := fs.NewFs(t.remote)
if err != nil {
return err
}
entries, err := list.DirSorted(f, true, "")
if err != nil {
return err
}
return entries.ForDirError(func(dir fs.Directory) error {
remote := dir.Remote()
if fstest.MatchTestRemote.MatchString(remote) {
log.Printf("Purging %s%s", t.remote, remote)
dir, err := fs.NewFs(t.remote + remote)
if err != nil {
return err
}
return operations.Purge(dir)
}
return nil
})
}
// clean runs a single clean on a fs for left over directories
func (t *test) clean() {
log.Printf("%q - Starting clean (try %d/%d)", t.remote, t.try, *maxTries)
start := time.Now()
t.err = t.cleanFs()
if t.err != nil {
log.Printf("%q - Failed to purge %v", t.remote, t.err)
}
duration := time.Since(start)
if t.passed() {
log.Printf("%q - Finished OK in %v (try %d/%d)", t.cmdString, duration, t.try, *maxTries)
} else {
log.Printf("%q - Finished ERROR in %v (try %d/%d): %v", t.cmdString, duration, t.try, *maxTries, t.err)
}
}
// passed returns true if the test passed
func (t *test) passed() bool {
return t.err == nil
}
// run runs all the trials for this test
func (t *test) run(result chan<- *test) {
for t.try = 1; t.try <= *maxTries; t.try++ {
if *clean {
if !t.subdir {
t.clean()
}
} else {
t.trial()
}
if t.passed() {
break
}
}
if !t.passed() {
t.dumpOutput()
}
result <- t
}
// GOPATH returns the current GOPATH
func GOPATH() string {
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = build.Default.GOPATH
}
return gopath
}
// turn a package name into a binary name
func pkgBinaryName(pkg string) string {
binary := path.Base(pkg) + ".test"
if runtime.GOOS == "windows" {
binary += ".exe"
}
return binary
}
// turn a package name into a binary path
func pkgBinary(pkg string) string {
return path.Join(pkgPath(pkg), pkgBinaryName(pkg))
}
// returns the path to the package
func pkgPath(pkg string) string {
return path.Join(GOPATH(), "src", pkg)
}
// cd into the package directory
func pkgChdir(pkg string) {
err := os.Chdir(pkgPath(pkg))
if err != nil {
log.Fatalf("Failed to chdir to package %q: %v", pkg, err)
}
}
// makeTestBinary makes the binary we will run
func makeTestBinary(pkg string) {
binaryName := pkgBinaryName(pkg)
log.Printf("%s: Making test binary %q", pkg, binaryName)
pkgChdir(pkg)
err := exec.Command("go", "test", "-c", "-o", binaryName).Run()
if err != nil {
log.Fatalf("Failed to make test binary: %v", err)
}
binary := pkgBinary(pkg)
if _, err := os.Stat(binary); err != nil {
log.Fatalf("Couldn't find test binary %q", binary)
}
}
// removeTestBinary removes the binary made in makeTestBinary
func removeTestBinary(pkg string) {
binary := pkgBinary(pkg)
err := os.Remove(binary) // Delete the binary when finished
if err != nil {
log.Printf("Error removing test binary %q: %v", binary, err)
}
}
func main() {
flag.Parse()
packages := flag.Args()
log.Printf("Testing packages: %s", strings.Join(packages, ", "))
if *runTests != "" {
newRemotes := []remoteConfig{}
for _, name := range strings.Split(*runTests, ",") {
for i := range remotes {
if remotes[i].Name == name {
newRemotes = append(newRemotes, remotes[i])
goto found
}
}
log.Printf("Remote %q not found - inserting with default flags", name)
newRemotes = append(newRemotes, remoteConfig{Name: name})
found:
}
remotes = newRemotes
}
var names []string
for _, remote := range remotes {
names = append(names, remote.Name)
}
log.Printf("Testing remotes: %s", strings.Join(names, ", "))
start := time.Now()
if *clean {
config.LoadConfig()
} else {
for _, pkg := range packages {
makeTestBinary(pkg)
defer removeTestBinary(pkg)
}
}
// start the tests
results := make(chan *test, 8)
awaiting := 0
bools := []bool{false, true}
if *clean {
// Don't run -subdir and -fast-list if -clean
bools = bools[:1]
}
for _, pkg := range packages {
for _, remote := range remotes {
for _, subdir := range bools {
for _, fastlist := range bools {
if (!subdir || subdir && remote.SubDir) && (!fastlist || fastlist && remote.FastList) {
go newTest(pkg, remote.Name, subdir, fastlist).run(results)
awaiting++
}
}
}
}
}
// Wait for the tests to finish
var failed []*test
for ; awaiting > 0; awaiting-- {
t := <-results
if !t.passed() {
failed = append(failed, t)
}
}
duration := time.Since(start)
// Summarise results
log.Printf("SUMMARY")
if len(failed) == 0 {
log.Printf("PASS: All tests finished OK in %v", duration)
} else {
log.Printf("FAIL: %d tests failed in %v", len(failed), duration)
for _, t := range failed {
log.Printf(" * %s", toShell(t.nextCmdLine()))
log.Printf(" * Failed tests: %v", t.failedTests)
}
os.Exit(1)
}
}