1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-07 19:13:19 +00:00

Compare commits

..

10 Commits
v1.08 ... v1.10

Author SHA1 Message Date
Nick Craig-Wood
cf208ad21b Version v1.10 2015-02-12 18:00:20 +00:00
Nick Craig-Wood
0faed16899 s3: list an unlimited number of items - fixes #22 2015-02-10 17:58:29 +00:00
Nick Craig-Wood
8d1c0ad07c Fix config loop - fixes #25 2015-02-10 16:48:04 +00:00
Nick Craig-Wood
165e89c266 Version v1.09 2015-02-07 22:44:23 +00:00
Nick Craig-Wood
b4e19cfd62 windows: make tests work properly 2015-02-07 22:32:51 +00:00
Nick Craig-Wood
20ad96f3cd windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
This was done by stopping the user configuring single letter remotes
and making sure we don't treat single letter remotes as a remote name,
but as a drive letter.
2015-02-07 22:32:51 +00:00
Nick Craig-Wood
d64a37772f local: Fix directory separators on Windows - fixes #24 2015-02-07 22:32:51 +00:00
Nick Craig-Wood
5fb6f94579 drive: fix rate limit exceeded errors - fixes #20
This is done by pacing the requests to drive and backing them off
using an exponential increase.  Put and Modify operations can now be
retried also.
2015-02-07 22:32:51 +00:00
Nick Craig-Wood
20535348db Update docs to remove obsolete bug - fixes #21 2015-02-07 22:32:51 +00:00
Nick Craig-Wood
3d83a265c5 Update notes 2015-02-05 22:44:02 +00:00
15 changed files with 329 additions and 122 deletions

View File

@@ -254,14 +254,18 @@ COPYING file included in this package).
Bugs Bugs
---- ----
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* just retry the command if this happens
* Empty directories left behind with Local and Drive * Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work * eg purging a local directory with subdirectories doesn't work
Changelog Changelog
--------- ---------
* v1.10 - 2015-02-12
* s3: list an unlimited number of items
* Fix getting stuck in the configurator
* v1.09 - 2015-02-07
* windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
* local: Fix directory separators on Windows
* drive: fix rate limit exceeded errors
* v1.08 - 2015-02-04 * v1.08 - 2015-02-04
* drive: fix subdirectory listing to not list entire drive * drive: fix subdirectory listing to not list entire drive
* drive: Fix SetModTime * drive: Fix SetModTime

View File

@@ -132,9 +132,6 @@ COPYING file included in this package).
Bugs Bugs
---- ----
* Doesn't sync individual files yet, only directories.
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* Empty directories left behind with Local and Drive * Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work * eg purging a local directory with subdirectories doesn't work

View File

@@ -2,34 +2,34 @@
title: "Rclone downloads" title: "Rclone downloads"
description: "Download rclone binaries for your OS." description: "Download rclone binaries for your OS."
type: page type: page
date: "2015-02-04" date: "2015-02-12"
--- ---
Rclone Download v1.08 Rclone Download v1.10
===================== =====================
* Windows * Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.08-windows-386.zip) * [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.10-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.08-windows-amd64.zip) * [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.10-windows-amd64.zip)
* OSX * OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.08-osx-386.zip) * [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.10-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.08-osx-amd64.zip) * [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.10-osx-amd64.zip)
* Linux * Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.08-linux-386.zip) * [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.10-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.08-linux-amd64.zip) * [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.10-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.08-linux-arm.zip) * [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.10-linux-arm.zip)
* FreeBSD * FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.08-freebsd-386.zip) * [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.10-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.08-freebsd-amd64.zip) * [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.10-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.08-freebsd-arm.zip) * [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.10-freebsd-arm.zip)
* NetBSD * NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.08-netbsd-386.zip) * [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.10-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.08-netbsd-amd64.zip) * [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.10-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.08-netbsd-arm.zip) * [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.10-netbsd-arm.zip)
* OpenBSD * OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.08-openbsd-386.zip) * [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.10-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.08-openbsd-amd64.zip) * [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.10-openbsd-amd64.zip)
* Plan 9 * Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.08-plan9-386.zip) * [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.10-plan9-386.zip)
Older downloads can be found [here](http://downloads.rclone.org/) Older downloads can be found [here](http://downloads.rclone.org/)

View File

@@ -1,14 +1,6 @@
// Drive interface // Drive interface
package drive package drive
// Gets this quite often
// Failed to set mtime: googleapi: Error 403: Rate Limit Exceeded
// FIXME list containers equivalent should list directories?
// FIXME list directory should list to channel for concurrency not
// append to array
// FIXME need to deal with some corner cases // FIXME need to deal with some corner cases
// * multiple files with the same name // * multiple files with the same name
// * files can be in multiple directories // * files can be in multiple directories
@@ -27,6 +19,7 @@ import (
"time" "time"
"google.golang.org/api/drive/v2" "google.golang.org/api/drive/v2"
"google.golang.org/api/googleapi"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/googleauth" "github.com/ncw/rclone/googleauth"
@@ -40,6 +33,9 @@ const (
driveFolderType = "application/vnd.google-apps.folder" driveFolderType = "application/vnd.google-apps.folder"
timeFormatIn = time.RFC3339 timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00" timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
) )
// Globals // Globals
@@ -83,6 +79,8 @@ type FsDrive struct {
findRootLock sync.Mutex // Protect findRoot from concurrent use findRootLock sync.Mutex // Protect findRoot from concurrent use
dirCache dirCache // Map of directory path to directory id dirCache dirCache // Map of directory path to directory id
findDirLock sync.Mutex // Protect findDir from concurrent use findDirLock sync.Mutex // Protect findDir from concurrent use
pacer chan struct{} // To pace the operations
sleepTime time.Duration // Time to sleep for each transaction
} }
// FsObjectDrive describes a drive object // FsObjectDrive describes a drive object
@@ -149,6 +147,74 @@ func (f *FsDrive) String() string {
return fmt.Sprintf("Google drive root '%s'", f.root) return fmt.Sprintf("Google drive root '%s'", f.root)
} }
// Wait for the pace
func (f *FsDrive) paceWait() {
// pacer starts with a token in and whenever we take one out
// XXX ms later we put another in. We could do this with a
// Ticker more accurately, but then we'd have to work out how
// not to run it when it wasn't needed
<-f.pacer
// Restart the timer
go func(t time.Duration) {
// fs.Debug(f, "New sleep for %v at %v", t, time.Now())
time.Sleep(t)
f.pacer <- struct{}{}
}(f.sleepTime)
}
// Refresh the pace given an error that was returned. It returns a
// boolean as to whether the operation should be retried.
//
// See https://developers.google.com/drive/web/handle-errors
// http://stackoverflow.com/questions/18529524/403-rate-limit-after-only-1-insert-per-second
func (f *FsDrive) paceRefresh(err error) bool {
again := false
oldSleepTime := f.sleepTime
if err == nil {
f.sleepTime = (f.sleepTime<<decayConstant - f.sleepTime) >> decayConstant
if f.sleepTime < minSleep {
f.sleepTime = minSleep
}
if f.sleepTime != oldSleepTime {
fs.Debug(f, "Reducing sleep to %v", f.sleepTime)
}
} else {
fs.Debug(f, "Error recived: %v", err)
if gerr, ok := err.(*googleapi.Error); ok {
if len(gerr.Errors) > 0 {
reason := gerr.Errors[0].Reason
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
f.sleepTime *= 2
if f.sleepTime > maxSleep {
f.sleepTime = maxSleep
}
if f.sleepTime != oldSleepTime {
fs.Debug(f, "Rate limited, increasing sleep to %v", f.sleepTime)
}
again = true
}
}
}
}
return again
}
// Pace the remote operations to not exceed Google's limits and retry
// on 403 rate limit exceeded
//
// This calls fn, expecting it to place its error in perr
func (f *FsDrive) pace(perr *error, fn func()) {
for {
f.paceWait()
fn()
if !f.paceRefresh(*perr) {
break
}
}
}
// parseParse parses a drive 'url' // parseParse parses a drive 'url'
func parseDrivePath(path string) (root string, err error) { func parseDrivePath(path string) (root string, err error) {
root = strings.Trim(path, "/") root = strings.Trim(path, "/")
@@ -186,7 +252,10 @@ func (f *FsDrive) listAll(dirId string, title string, directoriesOnly bool, file
list := f.svc.Files.List().Q(query).MaxResults(1000) list := f.svc.Files.List().Q(query).MaxResults(1000)
OUTER: OUTER:
for { for {
files, err := list.Do() var files *drive.FileList
f.pace(&err, func() {
files, err = list.Do()
})
if err != nil { if err != nil {
return false, fmt.Errorf("Couldn't list directory: %s", err) return false, fmt.Errorf("Couldn't list directory: %s", err)
} }
@@ -217,10 +286,15 @@ func NewFs(name, path string) (fs.Fs, error) {
} }
f := &FsDrive{ f := &FsDrive{
root: root, root: root,
dirCache: newDirCache(), dirCache: newDirCache(),
pacer: make(chan struct{}, 1),
sleepTime: minSleep,
} }
// Put the first pacing token in
f.pacer <- struct{}{}
// Create a new authorized Drive client. // Create a new authorized Drive client.
f.client = t.Client() f.client = t.Client()
f.svc, err = drive.New(f.client) f.svc, err = drive.New(f.client)
@@ -229,7 +303,9 @@ func NewFs(name, path string) (fs.Fs, error) {
} }
// Read About so we know the root path // Read About so we know the root path
f.about, err = f.svc.About.Get().Do() f.pace(&err, func() {
f.about, err = f.svc.About.Get().Do()
})
if err != nil { if err != nil {
return nil, fmt.Errorf("Couldn't read info about Drive: %s", err) return nil, fmt.Errorf("Couldn't read info about Drive: %s", err)
} }
@@ -489,13 +565,16 @@ func (f *FsDrive) _findDir(path string, create bool) (pathId string, err error)
if create { if create {
// fmt.Println("Making", path) // fmt.Println("Making", path)
// Define the metadata for the directory we are going to create. // Define the metadata for the directory we are going to create.
info := &drive.File{ createInfo := &drive.File{
Title: leaf, Title: leaf,
Description: leaf, Description: leaf,
MimeType: driveFolderType, MimeType: driveFolderType,
Parents: []*drive.ParentReference{{Id: pathId}}, Parents: []*drive.ParentReference{{Id: pathId}},
} }
info, err := f.svc.Files.Insert(info).Do() var info *drive.File
f.pace(&err, func() {
info, err = f.svc.Files.Insert(createInfo).Do()
})
if err != nil { if err != nil {
return pathId, fmt.Errorf("Failed to make directory: %v", err) return pathId, fmt.Errorf("Failed to make directory: %v", err)
} }
@@ -629,7 +708,7 @@ func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64
modifiedDate := modTime.Format(timeFormatOut) modifiedDate := modTime.Format(timeFormatOut)
// Define the metadata for the file we are going to create. // Define the metadata for the file we are going to create.
info := &drive.File{ createInfo := &drive.File{
Title: leaf, Title: leaf,
Description: leaf, Description: leaf,
Parents: []*drive.ParentReference{{Id: directoryId}}, Parents: []*drive.ParentReference{{Id: directoryId}},
@@ -639,7 +718,13 @@ func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64
// Make the API request to upload metadata and file data. // Make the API request to upload metadata and file data.
in = &fs.SeekWrapper{In: in, Size: size} in = &fs.SeekWrapper{In: in, Size: size}
info, err = f.svc.Files.Insert(info).Media(in).Do() var info *drive.File
// Don't retry, return a retry error instead
f.paceWait()
info, err = f.svc.Files.Insert(createInfo).Media(in).Do()
if f.paceRefresh(err) {
return o, fs.RetryErrorf("Upload failed - retry: %s", err)
}
if err != nil { if err != nil {
return o, fmt.Errorf("Upload failed: %s", err) return o, fmt.Errorf("Upload failed: %s", err)
} }
@@ -660,7 +745,10 @@ func (f *FsDrive) Rmdir() error {
if err != nil { if err != nil {
return err return err
} }
children, err := f.svc.Children.List(f.rootId).MaxResults(10).Do() var children *drive.ChildList
f.pace(&err, func() {
children, err = f.svc.Children.List(f.rootId).MaxResults(10).Do()
})
if err != nil { if err != nil {
return err return err
} }
@@ -669,7 +757,9 @@ func (f *FsDrive) Rmdir() error {
} }
// Delete the directory if it isn't the root // Delete the directory if it isn't the root
if f.root != "" { if f.root != "" {
err = f.svc.Files.Delete(f.rootId).Do() f.pace(&err, func() {
err = f.svc.Files.Delete(f.rootId).Do()
})
if err != nil { if err != nil {
return err return err
} }
@@ -696,7 +786,9 @@ func (f *FsDrive) Purge() error {
if err != nil { if err != nil {
return err return err
} }
err = f.svc.Files.Delete(f.rootId).Do() f.pace(&err, func() {
err = f.svc.Files.Delete(f.rootId).Do()
})
f.resetRoot() f.resetRoot()
if err != nil { if err != nil {
return err return err
@@ -801,11 +893,14 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) {
return return
} }
// New metadata // New metadata
info := &drive.File{ updateInfo := &drive.File{
ModifiedDate: modTime.Format(timeFormatOut), ModifiedDate: modTime.Format(timeFormatOut),
} }
// Set modified date // Set modified date
info, err = o.drive.svc.Files.Update(o.id, info).SetModifiedDate(true).Do() var info *drive.File
o.drive.pace(&err, func() {
info, err = o.drive.svc.Files.Update(o.id, updateInfo).SetModifiedDate(true).Do()
})
if err != nil { if err != nil {
fs.Stats.Error() fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err) fs.Log(o, "Failed to update remote mtime: %s", err)
@@ -826,7 +921,10 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
return nil, err return nil, err
} }
req.Header.Set("User-Agent", fs.UserAgent) req.Header.Set("User-Agent", fs.UserAgent)
res, err := o.drive.client.Do(req) var res *http.Response
o.drive.pace(&err, func() {
res, err = o.drive.client.Do(req)
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -843,14 +941,21 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) error { func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) error {
info := &drive.File{ updateInfo := &drive.File{
Id: o.id, Id: o.id,
ModifiedDate: modTime.Format(timeFormatOut), ModifiedDate: modTime.Format(timeFormatOut),
} }
// Make the API request to upload metadata and file data. // Make the API request to upload metadata and file data.
in = &fs.SeekWrapper{In: in, Size: size} in = &fs.SeekWrapper{In: in, Size: size}
info, err := o.drive.svc.Files.Update(info.Id, info).SetModifiedDate(true).Media(in).Do() var err error
var info *drive.File
// Don't retry, return a retry error instead
o.drive.paceWait()
info, err = o.drive.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in).Do()
if o.drive.paceRefresh(err) {
return fs.RetryErrorf("Update failed - retry: %s", err)
}
if err != nil { if err != nil {
return fmt.Errorf("Update failed: %s", err) return fmt.Errorf("Update failed: %s", err)
} }
@@ -860,7 +965,11 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro
// Remove an object // Remove an object
func (o *FsObjectDrive) Remove() error { func (o *FsObjectDrive) Remove() error {
return o.drive.svc.Files.Delete(o.id).Do() var err error
o.drive.pace(&err, func() {
err = o.drive.svc.Files.Delete(o.id).Do()
})
return err
} }
// Check the interfaces are satisfied // Check the interfaces are satisfied

View File

@@ -330,9 +330,20 @@ func EditConfig() {
name := ChooseRemote() name := ChooseRemote()
EditRemote(name) EditRemote(name)
case 'n': case 'n':
fmt.Printf("name> ") nameLoop:
name := ReadLine() for {
NewRemote(name) fmt.Printf("name> ")
name := ReadLine()
switch {
case name == "":
fmt.Printf("Can't use empty name\n")
case isDriveLetter(name):
fmt.Printf("Can't use %q as it can be confused a drive letter\n", name)
default:
NewRemote(name)
break nameLoop
}
}
case 'd': case 'd':
name := ChooseRemote() name := ChooseRemote()
DeleteRemote(name) DeleteRemote(name)

12
fs/driveletter.go Normal file
View File

@@ -0,0 +1,12 @@
// +build !windows
package fs
// isDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
//
// On non windows platforms we don't have drive letters so we always
// return false
func isDriveLetter(name string) bool {
return false
}

13
fs/driveletter_windows.go Normal file
View File

@@ -0,0 +1,13 @@
// +build windows
package fs
// isDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
func isDriveLetter(name string) bool {
if len(name) != 1 {
return false
}
c := name[0]
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}

View File

@@ -142,6 +142,35 @@ type Purger interface {
Purge() error Purge() error
} }
// An optional interface for error as to whether the operation should be retried
//
// This should be returned from Update or Put methods as required
type Retry interface {
error
Retry() bool
}
// A type of error
type retryError string
// Error interface
func (r retryError) Error() string {
return string(r)
}
// Retry interface
func (r retryError) Retry() bool {
return true
}
// Check interface
var _ Retry = retryError("")
// RetryErrorf makes an error which indicates it would like to be retried
func RetryErrorf(format string, a ...interface{}) error {
return retryError(fmt.Sprintf(format, a...))
}
// A channel of Objects // A channel of Objects
type ObjectsChan chan Object type ObjectsChan chan Object
@@ -167,9 +196,6 @@ type Dir struct {
// A channel of Dir objects // A channel of Dir objects
type DirChan chan *Dir type DirChan chan *Dir
// Pattern to match a url
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
// Finds a FsInfo object for the name passed in // Finds a FsInfo object for the name passed in
// //
// Services are looked up in the config file // Services are looked up in the config file
@@ -182,16 +208,22 @@ func Find(name string) (*FsInfo, error) {
return nil, fmt.Errorf("Didn't find filing system for %q", name) return nil, fmt.Errorf("Didn't find filing system for %q", name)
} }
// Pattern to match an rclone url
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
// NewFs makes a new Fs object from the path // NewFs makes a new Fs object from the path
// //
// The path is of the form remote:path // The path is of the form remote:path
// //
// Remotes are looked up in the config file. If the remote isn't // Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned. // found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(path string) (Fs, error) { func NewFs(path string) (Fs, error) {
parts := matcher.FindStringSubmatch(path) parts := matcher.FindStringSubmatch(path)
fsName, configName, fsPath := "local", "local", path fsName, configName, fsPath := "local", "local", path
if parts != nil { if parts != nil && !isDriveLetter(parts[1]) {
configName, fsPath = parts[1], parts[2] configName, fsPath = parts[1], parts[2]
var err error var err error
fsName, err = ConfigFile.GetValue(configName, "type") fsName, err = ConfigFile.GetValue(configName, "type")

View File

@@ -103,8 +103,7 @@ func removeFailedCopy(dst Object) {
Debug(dst, "Removing failed copy") Debug(dst, "Removing failed copy")
removeErr := dst.Remove() removeErr := dst.Remove()
if removeErr != nil { if removeErr != nil {
Stats.Error() Debug(dst, "Failed to remove failed copy: %s", removeErr)
Log(dst, "Failed to remove failed copy: %s", removeErr)
} }
} }
} }
@@ -115,6 +114,10 @@ func removeFailedCopy(dst Object) {
// call Copy() with dst nil on a pre-existing file then some filing // call Copy() with dst nil on a pre-existing file then some filing
// systems (eg Drive) may duplicate the file. // systems (eg Drive) may duplicate the file.
func Copy(f Fs, dst, src Object) { func Copy(f Fs, dst, src Object) {
const maxTries = 10
tries := 0
doUpdate := dst != nil
tryAgain:
in0, err := src.Open() in0, err := src.Open()
if err != nil { if err != nil {
Stats.Error() Stats.Error()
@@ -124,7 +127,7 @@ func Copy(f Fs, dst, src Object) {
in := NewAccount(in0) // account the transfer in := NewAccount(in0) // account the transfer
var actionTaken string var actionTaken string
if dst != nil { if doUpdate {
actionTaken = "Copied (updated existing)" actionTaken = "Copied (updated existing)"
err = dst.Update(in, src.ModTime(), src.Size()) err = dst.Update(in, src.ModTime(), src.Size())
} else { } else {
@@ -132,6 +135,13 @@ func Copy(f Fs, dst, src Object) {
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size()) dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
} }
inErr := in.Close() inErr := in.Close()
// Retry if err returned a retry error
if r, ok := err.(Retry); ok && r.Retry() && tries < maxTries {
tries++
Log(src, "Received error: %v - retrying %d/%d", err, tries, maxTries)
removeFailedCopy(dst)
goto tryAgain
}
if err == nil { if err == nil {
err = inErr err = inErr
} }

View File

@@ -10,6 +10,7 @@ import (
"log" "log"
"os" "os"
"path" "path"
"path/filepath"
"regexp" "regexp"
"strings" "strings"
"testing" "testing"
@@ -74,6 +75,7 @@ func TestInit(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create temp dir: %v", err) t.Fatalf("Failed to create temp dir: %v", err)
} }
localName = filepath.ToSlash(localName)
t.Logf("Testing with local %q", localName) t.Logf("Testing with local %q", localName)
flocal, err = fs.NewFs(localName) flocal, err = fs.NewFs(localName)
if err != nil { if err != nil {
@@ -83,6 +85,7 @@ func TestInit(t *testing.T) {
} }
func TestCalculateModifyWindow(t *testing.T) { func TestCalculateModifyWindow(t *testing.T) {
fs.CalculateModifyWindow(fremote, flocal) fs.CalculateModifyWindow(fremote, flocal)
t.Logf("ModifyWindow is %q", fs.Config.ModifyWindow)
} }
func TestMkdir(t *testing.T) { func TestMkdir(t *testing.T) {
@@ -104,8 +107,8 @@ func TestCopyWithDryRun(t *testing.T) {
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"}, {Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
} }
fstest.CheckListing(t, flocal, items) fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListing(t, fremote, []fstest.Item{}) fstest.CheckListingWithPrecision(t, fremote, []fstest.Item{}, fs.Config.ModifyWindow)
} }
// Now without dry run // Now without dry run
@@ -119,8 +122,8 @@ func TestCopy(t *testing.T) {
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"}, {Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
} }
fstest.CheckListing(t, flocal, items) fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListing(t, fremote, items) fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
} }
func TestLsd(t *testing.T) { func TestLsd(t *testing.T) {
@@ -145,8 +148,8 @@ func TestCopyAfterDelete(t *testing.T) {
items := []fstest.Item{ items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"}, {Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
} }
fstest.CheckListing(t, flocal, []fstest.Item{}) fstest.CheckListingWithPrecision(t, flocal, []fstest.Item{}, fs.Config.ModifyWindow)
fstest.CheckListing(t, fremote, items) fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
} }
func TestCopyRedownload(t *testing.T) { func TestCopyRedownload(t *testing.T) {
@@ -158,8 +161,8 @@ func TestCopyRedownload(t *testing.T) {
items := []fstest.Item{ items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"}, {Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
} }
fstest.CheckListingWithPrecision(t, flocal, items, fremote.Precision()) fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListing(t, fremote, items) fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
// Clean the directory // Clean the directory
cleanTempDir(t) cleanTempDir(t)
@@ -179,8 +182,8 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
items := []fstest.Item{ items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"}, {Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
} }
fstest.CheckListing(t, flocal, items) fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListing(t, fremote, items) fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
} }
func TestSyncAfterAddingAFile(t *testing.T) { func TestSyncAfterAddingAFile(t *testing.T) {
@@ -193,8 +196,8 @@ func TestSyncAfterAddingAFile(t *testing.T) {
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"}, {Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"}, {Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
} }
fstest.CheckListing(t, flocal, items) fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListing(t, fremote, items) fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
} }
func TestSyncAfterChangingFilesSizeOnly(t *testing.T) { func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
@@ -207,8 +210,8 @@ func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"}, {Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"}, {Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
} }
fstest.CheckListing(t, flocal, items) fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListing(t, fremote, items) fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
} }
// Sync after changing a file's contents, modtime but not length // Sync after changing a file's contents, modtime but not length
@@ -222,8 +225,8 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) {
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"}, {Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"}, {Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
} }
fstest.CheckListing(t, flocal, items) fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListing(t, fremote, items) fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
} }
// Sync after removing a file and adding a file --dry-run // Sync after removing a file and adding a file --dry-run
@@ -248,8 +251,8 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"}, {Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"}, {Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
} }
fstest.CheckListing(t, flocal, items) fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListing(t, fremote, before) fstest.CheckListingWithPrecision(t, fremote, before, fs.Config.ModifyWindow)
} }
// Sync after removing a file and adding a file // Sync after removing a file and adding a file
@@ -262,8 +265,8 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"}, {Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"}, {Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
} }
fstest.CheckListing(t, flocal, items) fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListing(t, fremote, items) fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
} }
func TestLs(t *testing.T) { func TestLs(t *testing.T) {

View File

@@ -1,3 +1,3 @@
package fs package fs
const Version = "v1.08" const Version = "v1.10"

View File

@@ -9,6 +9,7 @@ import (
"log" "log"
"math/rand" "math/rand"
"os" "os"
"path/filepath"
"strings" "strings"
"testing" "testing"
"time" "time"
@@ -80,6 +81,7 @@ func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
i, ok := is.byName[obj.Remote()] i, ok := is.byName[obj.Remote()]
if !ok { if !ok {
t.Errorf("Unexpected file %q", obj.Remote()) t.Errorf("Unexpected file %q", obj.Remote())
return
} }
delete(is.byName, obj.Remote()) delete(is.byName, obj.Remote())
i.Check(t, obj, precision) i.Check(t, obj, precision)
@@ -140,6 +142,7 @@ func LocalRemote() (path string, err error) {
// Now remove the directory // Now remove the directory
err = os.Remove(path) err = os.Remove(path)
} }
path = filepath.ToSlash(path)
return return
} }

View File

@@ -1,6 +1,11 @@
// Local filesystem interface // Local filesystem interface
package local package local
// Note that all rclone paths should be / separated. Anything coming
// from the filepath module will have \ separators on windows so
// should be converted using filepath.ToSlash. Windows is quite happy
// with / separators so there is no need to convert them back.
import ( import (
"crypto/md5" "crypto/md5"
"encoding/hex" "encoding/hex"
@@ -45,7 +50,7 @@ type FsObjectLocal struct {
// NewFs contstructs an FsLocal from the path // NewFs contstructs an FsLocal from the path
func NewFs(name, root string) (fs.Fs, error) { func NewFs(name, root string) (fs.Fs, error) {
root = path.Clean(root) root = filepath.ToSlash(path.Clean(root))
f := &FsLocal{root: root} f := &FsLocal{root: root}
// Check to see if this points to a file // Check to see if this points to a file
fi, err := os.Lstat(f.root) fi, err := os.Lstat(f.root)
@@ -69,7 +74,8 @@ func (f *FsLocal) String() string {
// //
// May return nil if an error occurred // May return nil if an error occurred
func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object { func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
path := filepath.Join(f.root, remote) remote = filepath.ToSlash(remote)
path := path.Join(f.root, remote)
o := &FsObjectLocal{local: f, remote: remote, path: path} o := &FsObjectLocal{local: f, remote: remote, path: path}
if info != nil { if info != nil {
o.info = info o.info = info
@@ -173,7 +179,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
// Puts the FsObject to the local filesystem // Puts the FsObject to the local filesystem
func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
dstPath := filepath.Join(f.root, remote) dstPath := path.Join(f.root, remote)
// Temporary FsObject under construction - info filled in by Update() // Temporary FsObject under construction - info filled in by Update()
o := &FsObjectLocal{local: f, remote: remote, path: dstPath} o := &FsObjectLocal{local: f, remote: remote, path: dstPath}
err := o.Update(in, modTime, size) err := o.Update(in, modTime, size)

View File

@@ -38,6 +38,12 @@ Todo
* Need to make directory objects otherwise can't upload an empty directory * Need to make directory objects otherwise can't upload an empty directory
* Or could upload empty directories only? * Or could upload empty directories only?
More rsync features
* include
* exclude
* max size
* bandwidth limit
Ideas Ideas
* could do encryption - put IV into metadata? * could do encryption - put IV into metadata?
* optimise remote copy container to another container using remote * optimise remote copy container to another container using remote
@@ -59,15 +65,3 @@ Bugs
* Non verbose - not sure number transferred got counted up? CHECK * Non verbose - not sure number transferred got counted up? CHECK
* When doing copy it recurses the whole of the destination FS which isn't necessary * When doing copy it recurses the whole of the destination FS which isn't necessary
Making a release
* go get -u -f -v ./...
* make test
* make tag
* edit README.md
* git commit fs/version.go README.md docs/content/downloads.md
* make retag
* . ~/bin/go-cross
* make cross
* make upload
* make upload_website
* git push --tags origin master

View File

@@ -7,6 +7,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log"
"mime" "mime"
"net/http" "net/http"
"path" "path"
@@ -101,7 +102,8 @@ func init() {
// Constants // Constants
const ( const (
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
listChunkSize = 1024 // number of items to read at once
) )
// FsS3 represents a remote s3 server // FsS3 represents a remote s3 server
@@ -267,36 +269,47 @@ func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
if directories { if directories {
delimiter = "/" delimiter = "/"
} }
// FIXME need to implement ALL loop marker := ""
objects, err := f.b.List(f.root, delimiter, "", 10000) for {
if err != nil { objects, err := f.b.List(f.root, delimiter, marker, listChunkSize)
fs.Stats.Error() if err != nil {
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err) fs.Stats.Error()
} else { fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
rootLength := len(f.root)
if directories {
for _, remote := range objects.CommonPrefixes {
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
continue
}
remote := remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
fn(remote, &s3.Key{Key: remote})
}
} else { } else {
for i := range objects.Contents { rootLength := len(f.root)
object := &objects.Contents[i] if directories {
if !strings.HasPrefix(object.Key, f.root) { for _, remote := range objects.CommonPrefixes {
fs.Log(f, "Odd name received %q", object.Key) if !strings.HasPrefix(remote, f.root) {
continue fs.Log(f, "Odd name received %q", remote)
continue
}
remote := remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
fn(remote, &s3.Key{Key: remote})
}
} else {
for i := range objects.Contents {
object := &objects.Contents[i]
if !strings.HasPrefix(object.Key, f.root) {
fs.Log(f, "Odd name received %q", object.Key)
continue
}
remote := object.Key[rootLength:]
fn(remote, object)
} }
remote := object.Key[rootLength:]
fn(remote, object)
} }
} }
if !objects.IsTruncated {
break
}
// Use NextMarker if set, otherwise use last Key
marker = objects.NextMarker
if marker == "" {
marker = objects.Contents[len(objects.Contents)-1].Key
}
log.Printf("retry with marker = %q", marker)
} }
} }