1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-26 05:03:43 +00:00

Compare commits

..

1 Commits
v1.13 ... v1.02

Author SHA1 Message Date
Nick Craig-Wood
094cc27621 Version 1.02 2014-07-19 13:12:20 +01:00
42 changed files with 869 additions and 2917 deletions

View File

@@ -3,8 +3,7 @@ language: go
go:
- 1.1.2
- 1.2.2
- 1.3.3
- 1.4
- 1.3
- tip
script:

View File

@@ -2,13 +2,9 @@ TAG := $(shell git describe --tags)
LAST_TAG := $(shell git describe --tags --abbrev=0)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
rclone:
rclone: *.go */*.go
@go version
go install -v ./...
test: rclone
go test ./...
cd fs && ./test_all.sh
go build
doc: rclone.1 README.html README.txt
@@ -23,7 +19,7 @@ README.txt: README.md
install: rclone
install -d ${DESTDIR}/usr/bin
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
install -t ${DESTDIR}/usr/bin rclone
clean:
go clean ./...
@@ -35,10 +31,10 @@ website:
cd docs && hugo
upload_website: website
rclone -v sync docs/public memstore:www-rclone-org
./rclone -v sync docs/public memstore:www-rclone-org
upload:
rclone -v copy build/ memstore:downloads-rclone-org
./rclone -v copy build/ memstore:downloads-rclone-org
cross: doc
./cross-compile $(TAG)
@@ -61,6 +57,3 @@ tag:
retag:
git tag -f $(LAST_TAG)
gen_tests:
cd fstest/fstests && go run gen_tests.go

116
README.md
View File

@@ -75,34 +75,28 @@ Subcommands
rclone copy source:path dest:path
Copy the source to the destination. Doesn't transfer
unchanged files, testing by size and modification time or
unchanged files, testing first by modification time then by
MD5SUM. Doesn't delete files from the destination.
rclone sync source:path dest:path
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the `--dry-run` flag.
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the `--dry-run` flag.
rclone ls [remote:path]
List all the objects in the the path with size and path.
rclone lsd [remote:path]
List all directories/containers/buckets in the the path.
List all the objects in the the path with sizes.
rclone lsl [remote:path]
List all the objects in the the path with modification time,
size and path.
List all the objects in the the path with sizes and timestamps.
rclone md5sum [remote:path]
rclone lsd [remote:path]
Produces an md5sum file for all the objects in the path. This
is in the same format as the standard md5sum tool produces.
List all directories/objects/buckets in the the path.
rclone mkdir remote:path
@@ -123,30 +117,22 @@ Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
rclone config
rclone md5sum remote:path
Enter an interactive configuration session.
rclone help
This help.
Produces an md5sum file for all the objects in the path. This is in
the same format as the standard md5sum tool produces.
General options:
```
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
--checkers=8: Number of checkers to run in parallel.
--config="~/.rclone.conf": Config file.
--contimeout=1m0s: Connect timeout
-n, --dry-run=false: Do a trial run with no permanent changes
--log-file="": Log everything to this file
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats (0 to disable)
--timeout=5m0s: IO idle timeout
--stats=1m0s: Interval to print stats
--transfers=4: Number of file transfers to run in parallel.
-v, --verbose=false: Print lots more stuff
-V, --version=false: Print the version number
```
Developer options:
@@ -175,7 +161,12 @@ So to copy a local directory to a swift container called backup:
rclone sync /home/source swift:backup
For more help see the [online docs on Openstack Swift](http://rclone.org/swift).
The modified time is stored as metadata on the object as
`X-Object-Meta-Mtime` as floating point since the epoch.
This is a defacto standard (used in the official python-swiftclient
amongst others) for storing the modification time (as read using
os.Stat) for an object.
Amazon S3
---------
@@ -187,7 +178,8 @@ So to copy a local directory to a s3 container called backup
rclone sync /home/source s3:backup
For more help see the [online docs on Amazon S3](http://rclone.org/s3).
The modified time is stored as metadata on the object as
`X-Amz-Meta-Mtime` as floating point since the epoch.
Google drive
------------
@@ -202,7 +194,7 @@ To copy a local directory to a drive directory called backup
rclone copy /home/source remote:backup
For more help see the [online docs on Google Drive](http://rclone.org/drive).
Google drive stores modification times accurate to 1 ms natively.
Dropbox
-------
@@ -217,7 +209,10 @@ To copy a local directory to a drive directory called backup
rclone copy /home/source dropbox:backup
For more help see the [online docs on Dropbox](http://rclone.org/dropbox).
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
a Dropbox datastore called "rclone". Dropbox datastores are limited
to 100,000 rows so this is the maximum number of files rclone can
manage on Dropbox.
Google Cloud Storage
--------------------
@@ -233,7 +228,9 @@ To copy a local directory to a google cloud storage directory called backup
rclone copy /home/source remote:backup
For more help see the [online docs on Google Cloud Storage](http://rclone.org/googlecloudstorage/).
Google google cloud storage stores md5sums natively and rclone stores
modification times as metadata on the object, under the "mtime" key in
RFC3339 format accurate to 1ns.
Single file copies
------------------
@@ -257,62 +254,13 @@ COPYING file included in this package).
Bugs
----
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work
Changelog
---------
* v1.13 - 2015-05-10
* Revise documentation (especially sync)
* Implement --timeout and --conntimeout
* s3: ignore etags from multipart uploads which aren't md5sums
* v1.12 - 2015-03-15
* drive: Use chunked upload for files above a certain size
* drive: add --drive-chunk-size and --drive-upload-cutoff parameters
* drive: switch to insert from update when a failed copy deletes the upload
* core: Log duplicate files if they are detected
* v1.11 - 2015-03-04
* swift: add region parameter
* drive: fix crash on failed to update remote mtime
* In remote paths, change native directory separators to /
* Add synchronization to ls/lsl/lsd output to stop corruptions
* Ensure all stats/log messages to go stderr
* Add --log-file flag to log everything (including panics) to file
* Make it possible to disable stats printing with --stats=0
* Implement --bwlimit to limit data transfer bandwidth
* v1.10 - 2015-02-12
* s3: list an unlimited number of items
* Fix getting stuck in the configurator
* v1.09 - 2015-02-07
* windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
* local: Fix directory separators on Windows
* drive: fix rate limit exceeded errors
* v1.08 - 2015-02-04
* drive: fix subdirectory listing to not list entire drive
* drive: Fix SetModTime
* dropbox: adapt code to recent library changes
* v1.07 - 2014-12-23
* google cloud storage: fix memory leak
* v1.06 - 2014-12-12
* Fix "Couldn't find home directory" on OSX
* swift: Add tenant parameter
* Use new location of Google API packages
* v1.05 - 2014-08-09
* Improved tests and consequently lots of minor fixes
* core: Fix race detected by go race detector
* core: Fixes after running errcheck
* drive: reset root directory on Rmdir and Purge
* fs: Document that Purger returns error on empty directory, test and fix
* google cloud storage: fix ListDir on subdirectory
* google cloud storage: re-read metadata in SetModTime
* s3: make reading metadata more reliable to work around eventual consistency problems
* s3: strip trailing / from ListDir()
* swift: return directories without / in ListDir
* v1.04 - 2014-07-21
* google cloud storage: Fix crash on Update
* v1.03 - 2014-07-20
* swift, s3, dropbox: fix updated files being marked as corrupted
* Make compile with go 1.1 again
* v1.02 - 2014-07-19
* Implement Dropbox remote
* Implement Google Cloud Storage remote

View File

@@ -1,19 +0,0 @@
Required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages
* [gox](https://github.com/mitchellh/gox) for cross compiling
* Run `gox -build-toolchain`
* This assumes you have your own source checkout
* pandoc for making the html and man pages
Making a release
* go get -u -f -v ./...
* make test
* make tag
* edit README.md
* git commit fs/version.go README.md docs/content/downloads.md
* make retag
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross
* make cross
* make upload
* make upload_website
* git push --tags origin master

View File

@@ -58,34 +58,28 @@ Subcommands
rclone copy source:path dest:path
Copy the source to the destination. Doesn't transfer
unchanged files, testing by size and modification time or
unchanged files, testing first by modification time then by
MD5SUM. Doesn't delete files from the destination.
rclone sync source:path dest:path
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the `--dry-run` flag.
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the -dry-run flag.
rclone ls [remote:path]
List all the objects in the the path with size and path.
rclone lsd [remote:path]
List all directories/containers/buckets in the the path.
List all the objects in the the path with sizes.
rclone lsl [remote:path]
List all the objects in the the path with modification time,
size and path.
List all the objects in the the path with sizes and timestamps.
rclone md5sum [remote:path]
rclone lsd [remote:path]
Produces an md5sum file for all the objects in the path. This
is in the same format as the standard md5sum tool produces.
List all directories/objects/buckets in the the path.
rclone mkdir remote:path
@@ -106,28 +100,21 @@ Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
rclone config
rclone md5sum remote:path
Enter an interactive configuration session.
rclone help
This help.
Produces an md5sum file for all the objects in the path. This is in
the same format as the standard md5sum tool produces.
General options:
```
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
--checkers=8: Number of checkers to run in parallel.
--transfers=4: Number of file transfers to run in parallel.
--config="~/.rclone.conf": Config file.
--contimeout=1m0s: Connect timeout
-n, --dry-run=false: Do a trial run with no permanent changes
--log-file="": Log everything to this file
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats (0 to disable)
--timeout=5m0s: IO idle timeout
--transfers=4: Number of file transfers to run in parallel.
--stats=1m0s: Interval to print stats
-v, --verbose=false: Print lots more stuff
-V, --version=false: Print the version number
```
Developer options:
@@ -145,6 +132,9 @@ COPYING file included in this package).
Bugs
----
* Doesn't sync individual files yet, only directories.
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work

View File

@@ -2,34 +2,34 @@
title: "Rclone downloads"
description: "Download rclone binaries for your OS."
type: page
date: "2015-05-10"
date: "2014-07-19"
---
Rclone Download v1.13
Rclone Download v1.02
=====================
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-windows-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-osx-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.13-linux-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.02-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.13-freebsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.02-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.13-netbsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.02-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-openbsd-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-plan9-386.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-plan9-386.zip)
Older downloads can be found [here](http://downloads.rclone.org/)

View File

@@ -1,7 +1,7 @@
---
title: "Google drive"
description: "Rclone docs for Google drive"
date: "2015-05-10"
date: "2014-04-26"
---
<i class="fa fa-google"></i> Google Drive
@@ -73,16 +73,3 @@ Modified time
-------------
Google drive stores modification times accurate to 1 ms.
Revisions
---------
Google drive stores revisions of files. When you upload a change to
an existing file to google drive using rclone it will create a new
revision of that file.
Revisions follow the standard google policy which at time of writing
was
* They are deleted after 30 days or 100 revisions (whatever comes first).
* They do not count towards a user storage quota.

View File

@@ -52,15 +52,12 @@ Choose a number from below, or type in your own value
* Memset Memstore UK v2
5) https://auth.storage.memset.com/v2.0
auth> 1
Tenant name - optional
tenant>
Remote config
--------------------
[remote]
user = user_name
key = password_or_api_key
auth = https://auth.api.rackspacecloud.com/v1.0
tenant =
--------------------
y) Yes this is OK
e) Edit this remote

View File

@@ -1,6 +1,14 @@
// Drive interface
package drive
// Gets this quite often
// Failed to set mtime: googleapi: Error 403: Rate Limit Exceeded
// FIXME list containers equivalent should list directories?
// FIXME list directory should list to channel for concurrency not
// append to array
// FIXME need to deal with some corner cases
// * multiple files with the same name
// * files can be in multiple directories
@@ -10,14 +18,16 @@ package drive
import (
"fmt"
"io"
"log"
"mime"
"net/http"
"os"
"path"
"strings"
"sync"
"time"
"google.golang.org/api/drive/v2"
"google.golang.org/api/googleapi"
"code.google.com/p/google-api-go-client/drive/v2"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/googleauth"
"github.com/ogier/pflag"
@@ -28,21 +38,14 @@ const (
rcloneClientId = "202264815644.apps.googleusercontent.com"
rcloneClientSecret = "X4Z3ca8xfWDb1Voo-F9a7ZxJ"
driveFolderType = "application/vnd.google-apps.folder"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
RFC3339In = time.RFC3339
RFC3339Out = "2006-01-02T15:04:05.000000000Z07:00"
)
// Globals
var (
// Flags
driveFullList = pflag.BoolP("drive-full-list", "", true, "Use a full listing for directory list. More data but usually quicker.")
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
chunkSize = fs.SizeSuffix(256 * 1024)
driveUploadCutoff = chunkSize
// Description of how to auth for this app
driveAuth = &googleauth.Auth{
Scope: "https://www.googleapis.com/auth/drive",
@@ -67,8 +70,6 @@ func init() {
Help: "Google Application Client Secret - leave blank to use rclone's.",
}},
})
pflag.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
pflag.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
}
// FsDrive represents a remote drive server
@@ -80,10 +81,8 @@ type FsDrive struct {
rootId string // Id of the root directory
foundRoot bool // Whether we have found the root or not
findRootLock sync.Mutex // Protect findRoot from concurrent use
dirCache *dirCache // Map of directory path to directory id
dirCache dirCache // Map of directory path to directory id
findDirLock sync.Mutex // Protect findDir from concurrent use
pacer chan struct{} // To pace the operations
sleepTime time.Duration // Time to sleep for each transaction
}
// FsObjectDrive describes a drive object
@@ -105,8 +104,8 @@ type dirCache struct {
}
// Make a new locked map
func newDirCache() *dirCache {
d := &dirCache{}
func newDirCache() dirCache {
d := dirCache{}
d.Flush()
return d
}
@@ -150,97 +149,6 @@ func (f *FsDrive) String() string {
return fmt.Sprintf("Google drive root '%s'", f.root)
}
// Start a call to the drive API
//
// This must be called as a pair with endCall
//
// This waits for the pacer token
func (f *FsDrive) beginCall() {
// pacer starts with a token in and whenever we take one out
// XXX ms later we put another in. We could do this with a
// Ticker more accurately, but then we'd have to work out how
// not to run it when it wasn't needed
<-f.pacer
// Restart the timer
go func(t time.Duration) {
// fs.Debug(f, "New sleep for %v at %v", t, time.Now())
time.Sleep(t)
f.pacer <- struct{}{}
}(f.sleepTime)
}
// End a call to the drive API
//
// Refresh the pace given an error that was returned. It returns a
// boolean as to whether the operation should be retried.
//
// See https://developers.google.com/drive/web/handle-errors
// http://stackoverflow.com/questions/18529524/403-rate-limit-after-only-1-insert-per-second
func (f *FsDrive) endCall(err error) bool {
again := false
oldSleepTime := f.sleepTime
if err == nil {
f.sleepTime = (f.sleepTime<<decayConstant - f.sleepTime) >> decayConstant
if f.sleepTime < minSleep {
f.sleepTime = minSleep
}
if f.sleepTime != oldSleepTime {
fs.Debug(f, "Reducing sleep to %v", f.sleepTime)
}
} else {
fs.Debug(f, "Error recived: %T %#v", err, err)
// Check for net error Timeout()
if x, ok := err.(interface {
Timeout() bool
}); ok && x.Timeout() {
again = true
}
// Check for net error Temporary()
if x, ok := err.(interface {
Temporary() bool
}); ok && x.Temporary() {
again = true
}
switch gerr := err.(type) {
case *googleapi.Error:
if gerr.Code >= 500 && gerr.Code < 600 {
// All 5xx errors should be retried
again = true
} else if len(gerr.Errors) > 0 {
reason := gerr.Errors[0].Reason
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
again = true
}
}
}
}
if again {
f.sleepTime *= 2
if f.sleepTime > maxSleep {
f.sleepTime = maxSleep
}
if f.sleepTime != oldSleepTime {
fs.Debug(f, "Rate limited, increasing sleep to %v", f.sleepTime)
}
}
return again
}
// Pace the remote operations to not exceed Google's limits and retry
// on 403 rate limit exceeded
//
// This calls fn, expecting it to place its error in perr
func (f *FsDrive) call(perr *error, fn func()) {
for {
f.beginCall()
fn()
if !f.endCall(*perr) {
break
}
}
}
// parseParse parses a drive 'url'
func parseDrivePath(path string) (root string, err error) {
root = strings.Trim(path, "/")
@@ -278,10 +186,7 @@ func (f *FsDrive) listAll(dirId string, title string, directoriesOnly bool, file
list := f.svc.Files.List().Q(query).MaxResults(1000)
OUTER:
for {
var files *drive.FileList
f.call(&err, func() {
files, err = list.Do()
})
files, err := list.Do()
if err != nil {
return false, fmt.Errorf("Couldn't list directory: %s", err)
}
@@ -299,27 +204,8 @@ OUTER:
return
}
// Returns true of x is a power of 2 or zero
func isPowerOfTwo(x int64) bool {
switch {
case x == 0:
return true
case x < 0:
return false
default:
return (x & (x - 1)) == 0
}
}
// NewFs contstructs an FsDrive from the path, container:path
func NewFs(name, path string) (fs.Fs, error) {
if !isPowerOfTwo(int64(chunkSize)) {
return nil, fmt.Errorf("drive: chunk size %v isn't a power of two", chunkSize)
}
if chunkSize < 256*1024 {
return nil, fmt.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize)
}
t, err := driveAuth.NewTransport(name)
if err != nil {
return nil, err
@@ -331,15 +217,10 @@ func NewFs(name, path string) (fs.Fs, error) {
}
f := &FsDrive{
root: root,
dirCache: newDirCache(),
pacer: make(chan struct{}, 1),
sleepTime: minSleep,
root: root,
dirCache: newDirCache(),
}
// Put the first pacing token in
f.pacer <- struct{}{}
// Create a new authorized Drive client.
f.client = t.Client()
f.svc, err = drive.New(f.client)
@@ -348,15 +229,15 @@ func NewFs(name, path string) (fs.Fs, error) {
}
// Read About so we know the root path
f.call(&err, func() {
f.about, err = f.svc.About.Get().Do()
})
f.about, err = f.svc.About.Get().Do()
if err != nil {
return nil, fmt.Errorf("Couldn't read info about Drive: %s", err)
}
// Find the Id of the true root and clear everything
f.resetRoot()
// Find the Id of the root directory and the Id of its parent
f.rootId = f.about.RootFolderId
// Put the root directory in
f.dirCache.Put("", f.rootId)
// Find the current root
err = f.findRoot(false)
if err != nil {
@@ -370,7 +251,7 @@ func NewFs(name, path string) (fs.Fs, error) {
// No root so return old f
return f, nil
}
obj, err := newF.newFsObjectWithInfoErr(remote, nil)
obj, err := newF.newFsObjectWithInfo(remote, nil)
if err != nil {
// File doesn't exist so return old f
return f, nil
@@ -383,7 +264,7 @@ func NewFs(name, path string) (fs.Fs, error) {
}
// Return an FsObject from a path
func (f *FsDrive) newFsObjectWithInfoErr(remote string, info *drive.File) (fs.Object, error) {
func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
fs := &FsObjectDrive{
drive: f,
remote: remote,
@@ -403,8 +284,8 @@ func (f *FsDrive) newFsObjectWithInfoErr(remote string, info *drive.File) (fs.Ob
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) fs.Object {
fs, _ := f.newFsObjectWithInfoErr(remote, info)
func (f *FsDrive) NewFsObjectWithInfo(remote string, info *drive.File) fs.Object {
fs, _ := f.newFsObjectWithInfo(remote, info)
// Errors have already been logged
return fs
}
@@ -413,7 +294,7 @@ func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) fs.Object
//
// May return nil if an error occurred
func (f *FsDrive) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
return f.NewFsObjectWithInfo(remote, nil)
}
// Path should be directory path either "" or "path/"
@@ -437,7 +318,7 @@ func (f *FsDrive) listDirRecursive(dirId string, path string, out fs.ObjectsChan
} else {
// If item has no MD5 sum it isn't stored on drive, so ignore it
if item.Md5Checksum != "" {
if fs := f.newFsObjectWithInfo(path+item.Title, item); fs != nil {
if fs := f.NewFsObjectWithInfo(path+item.Title, item); fs != nil {
out <- fs
}
}
@@ -487,7 +368,7 @@ func (f *FsDrive) listDirFull(dirId string, path string, out fs.ObjectsChan) err
// fmt.Printf("file %s %s %s\n", path, item.Title, item.Id)
// If item has no MD5 sum it isn't stored on drive, so ignore it
if item.Md5Checksum != "" {
if fs := f.newFsObjectWithInfo(path, item); fs != nil {
if fs := f.NewFsObjectWithInfo(path, item); fs != nil {
out <- fs
}
}
@@ -610,18 +491,15 @@ func (f *FsDrive) _findDir(path string, create bool) (pathId string, err error)
if create {
// fmt.Println("Making", path)
// Define the metadata for the directory we are going to create.
createInfo := &drive.File{
info := &drive.File{
Title: leaf,
Description: leaf,
MimeType: driveFolderType,
Parents: []*drive.ParentReference{{Id: pathId}},
}
var info *drive.File
f.call(&err, func() {
info, err = f.svc.Files.Insert(createInfo).Do()
})
info, err := f.svc.Files.Insert(info).Do()
if err != nil {
return pathId, fmt.Errorf("Failed to make directory: %v", err)
return pathId, fmt.Errorf("Failed to make directory")
}
pathId = info.Id
} else {
@@ -659,20 +537,6 @@ func (f *FsDrive) findRoot(create bool) error {
return nil
}
// Resets the root directory to the absolute root and clears the dirCache
func (f *FsDrive) resetRoot() {
f.findRootLock.Lock()
defer f.findRootLock.Unlock()
f.foundRoot = false
f.dirCache.Flush()
// Put the true root in
f.rootId = f.about.RootFolderId
// Put the root directory in
f.dirCache.Put("", f.rootId)
}
// Walk the path returning a channel of FsObjects
func (f *FsDrive) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
@@ -681,16 +545,16 @@ func (f *FsDrive) List() fs.ObjectsChan {
err := f.findRoot(false)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't find root: %s", err)
log.Printf("Couldn't find root: %s", err)
} else {
if f.root == "" && *driveFullList {
if *driveFullList {
err = f.listDirFull(f.rootId, "", out)
} else {
err = f.listDirRecursive(f.rootId, "", out)
}
if err != nil {
fs.Stats.Error()
fs.Log(f, "List failed: %s", err)
log.Printf("List failed: %s", err)
}
}
}()
@@ -705,7 +569,7 @@ func (f *FsDrive) ListDir() fs.DirChan {
err := f.findRoot(false)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't find root: %s", err)
log.Printf("Couldn't find root: %s", err)
} else {
_, err := f.listAll(f.rootId, "", true, false, func(item *drive.File) bool {
dir := &fs.Dir{
@@ -713,19 +577,43 @@ func (f *FsDrive) ListDir() fs.DirChan {
Bytes: -1,
Count: -1,
}
dir.When, _ = time.Parse(timeFormatIn, item.ModifiedDate)
dir.When, _ = time.Parse(RFC3339In, item.ModifiedDate)
out <- dir
return false
})
if err != nil {
fs.Stats.Error()
fs.Log(f, "ListDir failed: %s", err)
log.Printf("ListDir failed: %s", err)
}
}
}()
return out
}
// seekWrapper wraps an io.Reader with a basic Seek for
// code.google.com/p/google-api-go-client/googleapi
// to detect the length (see getReaderSize function)
type seekWrapper struct {
in io.Reader
size int64
}
// Read bytes from the object - see io.Reader
func (file *seekWrapper) Read(p []byte) (n int, err error) {
return file.in.Read(p)
}
// Seek - minimal implementation for Google Drive's length detection
func (file *seekWrapper) Seek(offset int64, whence int) (int64, error) {
switch whence {
case os.SEEK_CUR:
return 0, nil
case os.SEEK_END:
return file.size, nil
}
return 0, nil
}
// Put the object
//
// This assumes that the object doesn't not already exists - if you
@@ -745,33 +633,27 @@ func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64
return o, fmt.Errorf("Couldn't find or make directory: %s", err)
}
// Guess the mime type
mimeType := mime.TypeByExtension(path.Ext(o.remote))
if mimeType == "" {
mimeType = "application/octet-stream"
}
modifiedDate := modTime.Format(RFC3339Out)
// Define the metadata for the file we are going to create.
createInfo := &drive.File{
info := &drive.File{
Title: leaf,
Description: leaf,
Parents: []*drive.ParentReference{{Id: directoryId}},
MimeType: fs.MimeType(o),
ModifiedDate: modTime.Format(timeFormatOut),
MimeType: mimeType,
ModifiedDate: modifiedDate,
}
var info *drive.File
if size == 0 || size < int64(driveUploadCutoff) {
// Make the API request to upload metadata and file data.
// Don't retry, return a retry error instead
f.beginCall()
info, err = f.svc.Files.Insert(createInfo).Media(in).Do()
if f.endCall(err) {
return o, fs.RetryErrorf("Upload failed - retry: %s", err)
}
if err != nil {
return o, fmt.Errorf("Upload failed: %s", err)
}
} else {
// Upload the file in chunks
info, err = f.Upload(in, size, createInfo.MimeType, createInfo, remote)
if err != nil {
return o, err
}
// Make the API request to upload metadata and file data.
in = &seekWrapper{in: in, size: size}
info, err = f.svc.Files.Insert(info).Media(in).Do()
if err != nil {
return o, fmt.Errorf("Upload failed: %s", err)
}
o.setMetaData(info)
return o, nil
@@ -790,10 +672,7 @@ func (f *FsDrive) Rmdir() error {
if err != nil {
return err
}
var children *drive.ChildList
f.call(&err, func() {
children, err = f.svc.Children.List(f.rootId).MaxResults(10).Do()
})
children, err := f.svc.Children.List(f.rootId).MaxResults(10).Do()
if err != nil {
return err
}
@@ -802,14 +681,11 @@ func (f *FsDrive) Rmdir() error {
}
// Delete the directory if it isn't the root
if f.root != "" {
f.call(&err, func() {
err = f.svc.Files.Delete(f.rootId).Do()
})
err = f.svc.Files.Delete(f.rootId).Do()
if err != nil {
return err
}
}
f.resetRoot()
return nil
}
@@ -831,10 +707,8 @@ func (f *FsDrive) Purge() error {
if err != nil {
return err
}
f.call(&err, func() {
err = f.svc.Files.Delete(f.rootId).Do()
})
f.resetRoot()
err = f.svc.Files.Delete(f.rootId).Do()
f.dirCache.Flush()
if err != nil {
return err
}
@@ -921,7 +795,7 @@ func (o *FsObjectDrive) ModTime() time.Time {
fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
modTime, err := time.Parse(RFC3339In, o.modifiedDate)
if err != nil {
fs.Log(o, "Failed to read mtime from object: %s", err)
return time.Now()
@@ -938,21 +812,15 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) {
return
}
// New metadata
updateInfo := &drive.File{
ModifiedDate: modTime.Format(timeFormatOut),
info := &drive.File{
ModifiedDate: modTime.Format(RFC3339Out),
}
// Set modified date
var info *drive.File
o.drive.call(&err, func() {
info, err = o.drive.svc.Files.Update(o.id, updateInfo).SetModifiedDate(true).Do()
})
_, err = o.drive.svc.Files.Update(o.id, info).SetModifiedDate(true).Do()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
return
}
// Update info from read data
o.setMetaData(info)
}
// Is this object storable
@@ -967,15 +835,12 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
return nil, err
}
req.Header.Set("User-Agent", fs.UserAgent)
var res *http.Response
o.drive.call(&err, func() {
res, err = o.drive.client.Do(req)
})
res, err := o.drive.client.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
_ = res.Body.Close() // ignore error
res.Body.Close()
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
@@ -987,30 +852,16 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
//
// The new object may have been created if an error is returned
func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) error {
updateInfo := &drive.File{
info := &drive.File{
Id: o.id,
ModifiedDate: modTime.Format(timeFormatOut),
ModifiedDate: modTime.Format(RFC3339Out),
}
// Make the API request to upload metadata and file data.
var err error
var info *drive.File
if size == 0 || size < int64(driveUploadCutoff) {
// Don't retry, return a retry error instead
o.drive.beginCall()
info, err = o.drive.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in).Do()
if o.drive.endCall(err) {
return fs.RetryErrorf("Update failed - retry: %s", err)
}
if err != nil {
return fmt.Errorf("Update failed: %s", err)
}
} else {
// Upload the file in chunks
info, err = o.drive.Upload(in, size, fs.MimeType(o), updateInfo, o.remote)
if err != nil {
return err
}
in = &seekWrapper{in: in, size: size}
info, err := o.drive.svc.Files.Update(info.Id, info).SetModifiedDate(true).Media(in).Do()
if err != nil {
return fmt.Errorf("Update failed: %s", err)
}
o.setMetaData(info)
return nil
@@ -1018,11 +869,7 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro
// Remove an object
func (o *FsObjectDrive) Remove() error {
var err error
o.drive.call(&err, func() {
err = o.drive.svc.Files.Delete(o.id).Do()
})
return err
return o.drive.svc.Files.Delete(o.id).Do()
}
// Check the interfaces are satisfied

View File

@@ -1,53 +0,0 @@
// Test Drive filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: go run gen_tests.go or make gen_tests
package drive_test
import (
"testing"
"github.com/ncw/rclone/drive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func init() {
fstests.NilObject = fs.Object((*drive.FsObjectDrive)(nil))
fstests.RemoteName = "TestDrive:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,246 +0,0 @@
// Upload for drive
//
// Docs
// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
// Files insert: https://developers.google.com/drive/v2/reference/files/insert
// Files update: https://developers.google.com/drive/v2/reference/files/update
//
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
package drive
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/ncw/rclone/fs"
"google.golang.org/api/drive/v2"
"google.golang.org/api/googleapi"
)
const (
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
statusResumeIncomplete = 308
// Number of times to try each chunk
maxTries = 10
)
// resumableUpload is used by the generated APIs to provide resumable uploads.
// It is not used by developers directly.
type resumableUpload struct {
f *FsDrive
remote string
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
URI string
// Media is the object being uploaded.
Media io.Reader
// MediaType defines the media type, e.g. "image/jpeg".
MediaType string
// ContentLength is the full size of the object being uploaded.
ContentLength int64
// Return value
ret *drive.File
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) {
fileId := info.Id
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(info)
if err != nil {
return nil, err
}
params := make(url.Values)
params.Set("alt", "json")
params.Set("uploadType", "resumable")
urls := "https://www.googleapis.com/upload/drive/v2/files"
method := "POST"
if fileId != "" {
params.Set("setModifiedDate", "true")
urls += "/{fileId}"
method = "PUT"
}
urls += "?" + params.Encode()
req, _ := http.NewRequest(method, urls, body)
googleapi.Expand(req.URL, map[string]string{
"fileId": fileId,
})
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType)
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
req.Header.Set("User-Agent", fs.UserAgent)
var res *http.Response
f.call(&err, func() {
res, err = f.client.Do(req)
if err == nil {
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
})
if err != nil {
return nil, err
}
loc := res.Header.Get("Location")
rx := &resumableUpload{
f: f,
remote: remote,
URI: loc,
Media: in,
MediaType: contentType,
ContentLength: size,
}
return rx.Upload()
}
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(start int64, body []byte) *http.Request {
reqSize := int64(len(body))
req, _ := http.NewRequest("POST", rx.URI, bytes.NewBuffer(body))
req.ContentLength = reqSize
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
}
req.Header.Set("Content-Type", rx.MediaType)
req.Header.Set("User-Agent", fs.UserAgent)
return req
}
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, fmt.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, fmt.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(start int64, body []byte) (int, error) {
req := rx.makeRequest(start, body)
res, err := rx.f.client.Do(req)
if err != nil {
return 599, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == statusResumeIncomplete {
return res.StatusCode, nil
}
err = googleapi.CheckResponse(res)
if err != nil {
return res.StatusCode, err
}
// When the entire file upload is complete, the server
// responds with an HTTP 201 Created along with any metadata
// associated with this resource. If this request had been
// updating an existing entity rather than creating a new one,
// the HTTP response code for a completed upload would have
// been 200 OK.
//
// So parse the response out of the body. We aren't expecting
// any other 2xx codes, so we parse it unconditionaly on
// StatusCode
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
return 598, err
}
return res.StatusCode, nil
}
// Upload uploads the chunks from the input
// It retries each chunk maxTries times (with a pause of uploadPause between attempts).
func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
buf := make([]byte, chunkSize)
var StatusCode int
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
} else {
buf = buf[:reqSize]
}
// Read the chunk
_, err := io.ReadFull(rx.Media, buf)
if err != nil {
return nil, err
}
// Transfer the chunk
for try := 1; try <= maxTries; try++ {
fs.Debug(rx.remote, "Sending chunk %d length %d, %d/%d", start, reqSize, try, maxTries)
rx.f.beginCall()
StatusCode, err = rx.transferChunk(start, buf)
rx.f.endCall(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
goto success
}
fs.Debug(rx.remote, "Retrying chunk %d/%d, code=%d, err=%v", try, maxTries, StatusCode, err)
}
fs.Debug(rx.remote, "Failed to send chunk")
return nil, fs.RetryErrorf("Chunk upload failed - retry: code=%d, err=%v", StatusCode, err)
success:
start += reqSize
}
// Resume or retry uploads that fail due to connection interruptions or
// any 5xx errors, including:
//
// 500 Internal Server Error
// 502 Bad Gateway
// 503 Service Unavailable
// 504 Gateway Timeout
//
// Use an exponential backoff strategy if any 5xx server error is
// returned when resuming or retrying upload requests. These errors can
// occur if a server is getting overloaded. Exponential backoff can help
// alleviate these kinds of problems during periods of high volume of
// requests or heavy network traffic. Other kinds of requests should not
// be handled by exponential backoff but you can still retry a number of
// them. When retrying these requests, limit the number of times you
// retry them. For example your code could limit to ten retries or less
// before reporting an error.
//
// Handle 404 Not Found errors when doing resumable uploads by starting
// the entire upload over from the beginning.
if rx.ret == nil {
return nil, fs.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
}
return rx.ret, nil
}

View File

@@ -17,20 +17,6 @@ This is a JSON decode error - from Update / UploadByChunk
- Caused by 500 error from dropbox
- See https://github.com/stacktic/dropbox/issues/1
- Possibly confusing dropbox with excess concurrency?
FIXME implement timeouts - need to get "github.com/stacktic/dropbox"
and hence "golang.org/x/oauth2" which uses DefaultTransport unless it
is set in the context passed into .Client()
func (db *Dropbox) client() *http.Client {
return db.config.Client(oauth2.NoContext, db.token)
}
// HTTPClient is the context key to use with golang.org/x/net/context's
// WithValue function to associate an *http.Client value with a context.
var HTTPClient ContextKey
So pass in a context with HTTPClient set...
*/
import (
@@ -59,8 +45,8 @@ const (
md5sumField = "md5sum"
mtimeField = "mtime"
maxCommitRetries = 5
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
RFC3339In = time.RFC3339
RFC3339Out = "2006-01-02T15:04:05.000000000Z07:00"
)
// Register with Fs
@@ -68,7 +54,7 @@ func init() {
fs.Register(&fs.FsInfo{
Name: "dropbox",
NewFs: NewFs,
Config: configHelper,
Config: Config,
Options: []fs.Option{{
Name: "app_key",
Help: "Dropbox App Key - leave blank to use rclone's.",
@@ -80,7 +66,7 @@ func init() {
}
// Configuration helper - called after the user has put in the defaults
func configHelper(name string) {
func Config(name string) {
// See if already have a token
token := fs.ConfigFile.MustValue(name, "token")
if token != "" {
@@ -228,9 +214,7 @@ func (f *FsDropbox) openDataStore() {
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) (fs.Object, error) {
o := &FsObjectDropbox{
dropbox: f,
remote: remote,
@@ -241,17 +225,26 @@ func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.O
err := o.readEntryAndSetMetadata()
if err != nil {
// logged already fs.Debug("Failed to read info: %s", err)
return nil
return nil, err
}
}
return o
return o, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDropbox) NewFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
fs, _ := f.newFsObjectWithInfo(remote, info)
// Errors have already been logged
return fs
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDropbox) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
return f.NewFsObjectWithInfo(remote, nil)
}
// Strips the root off entry and returns it
@@ -297,14 +290,14 @@ func (f *FsDropbox) list(out fs.ObjectsChan) {
// ignore directories
} else {
path := f.stripRoot(entry)
out <- f.newFsObjectWithInfo(path, entry)
out <- f.NewFsObjectWithInfo(path, entry)
}
}
}
if !deltaPage.HasMore {
break
}
cursor = deltaPage.Cursor.Cursor
cursor = deltaPage.Cursor
}
}
}
@@ -580,9 +573,7 @@ func (o *FsObjectDropbox) remotePath() string {
func metadataKey(path string) string {
// NB File system is case insensitive
path = strings.ToLower(path)
hash := md5.New()
_, _ = hash.Write([]byte(path))
return fmt.Sprintf("%x", hash.Sum(nil))
return fmt.Sprintf("%x", md5.Sum([]byte(path)))
}
// Returns the key for the metadata database
@@ -632,7 +623,7 @@ func (o *FsObjectDropbox) readMetaData() (err error) {
if !ok {
fs.Debug(o, "mtime not a string")
} else {
modTime, err := time.Parse(timeFormatIn, mtime)
modTime, err := time.Parse(RFC3339In, mtime)
if err != nil {
return err
}
@@ -642,7 +633,8 @@ func (o *FsObjectDropbox) readMetaData() (err error) {
}
// Last resort
return o.readEntryAndSetMetadata()
o.readEntryAndSetMetadata()
return nil
}
// ModTime returns the modification time of the object
@@ -674,16 +666,14 @@ func (o *FsObjectDropbox) setModTimeAndMd5sum(modTime time.Time, md5sum string)
if err != nil {
return fmt.Errorf("Couldn't set md5sum record: %s", err)
}
o.md5sum = md5sum
}
if !modTime.IsZero() {
mtime := modTime.Format(timeFormatOut)
mtime := modTime.Format(RFC3339Out)
err := record.Set(mtimeField, mtime)
if err != nil {
return fmt.Errorf("Couldn't set mtime record: %s", err)
}
o.modTime = modTime
}
return nil

View File

@@ -1,53 +0,0 @@
// Test Dropbox filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: go run gen_tests.go or make gen_tests
package dropbox_test
import (
"testing"
"github.com/ncw/rclone/dropbox"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func init() {
fstests.NilObject = fs.Object((*dropbox.FsObjectDropbox)(nil))
fstests.RemoteName = "TestDropbox:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -10,24 +10,13 @@ import (
"strings"
"sync"
"time"
"github.com/tsenart/tb"
)
// Globals
var (
Stats = NewStats()
tokenBucket *tb.Bucket
Stats = NewStats()
)
// Start the token bucket if necessary
func startTokenBucket() {
if bwLimit > 0 {
tokenBucket = tb.NewBucket(int64(bwLimit), 100*time.Millisecond)
Log(nil, "Starting bandwidth limiter at %vBytes/s", &bwLimit)
}
}
// Stringset holds some strings
type StringSet map[string]bool
@@ -189,10 +178,6 @@ func (file *Account) Read(p []byte) (n int, err error) {
if err == io.EOF {
// FIXME Do something?
}
// Limit the transfer speed if required
if tokenBucket != nil {
tokenBucket.Wait(int64(n))
}
return
}

View File

@@ -6,8 +6,6 @@ import (
"bufio"
"fmt"
"log"
"math"
"net/http"
"os"
"os/user"
"path"
@@ -17,7 +15,6 @@ import (
"time"
"github.com/Unknwon/goconfig"
"github.com/mreiferson/go-httpclient"
"github.com/ogier/pflag"
)
@@ -25,8 +22,6 @@ const (
configFileName = ".rclone.conf"
)
type SizeSuffix int64
// Global
var (
// Config file
@@ -38,147 +33,34 @@ var (
// Global config
Config = &ConfigInfo{}
// Flags
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
bwLimit SizeSuffix
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
)
func init() {
pflag.VarP(&bwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix k|M|G")
}
// Turn SizeSuffix into a string
func (x SizeSuffix) String() string {
scaled := float64(0)
suffix := ""
switch {
case x == 0:
return "0"
case x < 1024*1024:
scaled = float64(x) / 1024
suffix = "k"
case x < 1024*1024*1024:
scaled = float64(x) / 1024 / 1024
suffix = "M"
default:
scaled = float64(x) / 1024 / 1024 / 1024
suffix = "G"
}
if math.Floor(scaled) == scaled {
return fmt.Sprintf("%.0f%s", scaled, suffix)
}
return fmt.Sprintf("%.3f%s", scaled, suffix)
}
// Set a SizeSuffix
func (x *SizeSuffix) Set(s string) error {
if len(s) == 0 {
return fmt.Errorf("Empty string")
}
suffix := s[len(s)-1]
suffixLen := 1
var multiplier float64
switch suffix {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
suffixLen = 0
multiplier = 1 << 10
case 'k', 'K':
multiplier = 1 << 10
case 'm', 'M':
multiplier = 1 << 20
case 'g', 'G':
multiplier = 1 << 30
default:
return fmt.Errorf("Bad suffix %q", suffix)
}
s = s[:len(s)-suffixLen]
value, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if value < 0 {
return fmt.Errorf("Size can't be negative %q", s)
}
value *= multiplier
*x = SizeSuffix(value)
return nil
}
// Check it satisfies the interface
var _ pflag.Value = (*SizeSuffix)(nil)
// Filesystem config options
type ConfigInfo struct {
Verbose bool
Quiet bool
DryRun bool
ModifyWindow time.Duration
Checkers int
Transfers int
ConnectTimeout time.Duration // Connect timeout
Timeout time.Duration // Data channel timeout
}
// Transport returns an http.RoundTripper with the correct timeouts
func (ci *ConfigInfo) Transport() http.RoundTripper {
return &httpclient.Transport{
Proxy: http.ProxyFromEnvironment,
MaxIdleConnsPerHost: ci.Checkers + ci.Transfers + 1,
// ConnectTimeout, if non-zero, is the maximum amount of time a dial will wait for
// a connect to complete.
ConnectTimeout: ci.ConnectTimeout,
// ResponseHeaderTimeout, if non-zero, specifies the amount of
// time to wait for a server's response headers after fully
// writing the request (including its body, if any). This
// time does not include the time to read the response body.
ResponseHeaderTimeout: ci.Timeout,
// RequestTimeout, if non-zero, specifies the amount of time for the entire
// request to complete (including all of the above timeouts + entire response body).
// This should never be less than the sum total of the above two timeouts.
//RequestTimeout: NOT SET,
// ReadWriteTimeout, if non-zero, will set a deadline for every Read and
// Write operation on the request connection.
ReadWriteTimeout: ci.Timeout,
}
}
// Transport returns an http.Client with the correct timeouts
func (ci *ConfigInfo) Client() *http.Client {
return &http.Client{
Transport: ci.Transport(),
}
Verbose bool
Quiet bool
DryRun bool
ModifyWindow time.Duration
Checkers int
Transfers int
}
// Find the config directory
func configHome() string {
// Find users home directory
usr, err := user.Current()
if err == nil {
return usr.HomeDir
if err != nil {
log.Printf("Couldn't find home directory: %v", err)
return ""
}
// Fall back to reading $HOME - work around user.Current() not
// working for cross compiled binaries on OSX.
// https://github.com/golang/go/issues/6376
home := os.Getenv("HOME")
if home != "" {
return home
}
log.Printf("Couldn't find home directory or read HOME environment variable.")
log.Printf("Defaulting to storing config in current directory.")
log.Printf("Use -config flag to workaround.")
log.Printf("Error was: %v", err)
return ""
return usr.HomeDir
}
// Loads the config file
@@ -192,8 +74,6 @@ func LoadConfig() {
Config.Checkers = *checkers
Config.Transfers = *transfers
Config.DryRun = *dryRun
Config.Timeout = *timeout
Config.ConnectTimeout = *connectTimeout
ConfigPath = *configFile
@@ -207,9 +87,6 @@ func LoadConfig() {
log.Fatalf("Failed to read null config file: %v", err)
}
}
// Start the token bucket limiter
startTokenBucket()
}
// Save configuration file.
@@ -443,20 +320,9 @@ func EditConfig() {
name := ChooseRemote()
EditRemote(name)
case 'n':
nameLoop:
for {
fmt.Printf("name> ")
name := ReadLine()
switch {
case name == "":
fmt.Printf("Can't use empty name\n")
case isDriveLetter(name):
fmt.Printf("Can't use %q as it can be confused a drive letter\n", name)
default:
NewRemote(name)
break nameLoop
}
}
fmt.Printf("name> ")
name := ReadLine()
NewRemote(name)
case 'd':
name := ChooseRemote()
DeleteRemote(name)

View File

@@ -1,57 +0,0 @@
package fs
import "testing"
func TestSizeSuffixString(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0"},
{102, "0.100k"},
{1024, "1k"},
{1024 * 1024, "1M"},
{1024 * 1024 * 1024, "1G"},
{10 * 1024 * 1024 * 1024, "10G"},
{10.1 * 1024 * 1024 * 1024, "10.100G"},
} {
ss := SizeSuffix(test.in)
got := ss.String()
if test.want != got {
t.Errorf("Want %v got %v", test.want, got)
}
}
}
func TestSizeSuffixSet(t *testing.T) {
for i, test := range []struct {
in string
want int64
err bool
}{
{"0", 0, false},
{"0.1k", 102, false},
{"0.1", 102, false},
{"1K", 1024, false},
{"1", 1024, false},
{"2.5", 1024 * 2.5, false},
{"1M", 1024 * 1024, false},
{"1.g", 1024 * 1024 * 1024, false},
{"10G", 10 * 1024 * 1024 * 1024, false},
{"", 0, true},
{"1p", 0, true},
{"1.p", 0, true},
{"1p", 0, true},
{"-1K", 0, true},
} {
ss := SizeSuffix(0)
err := ss.Set(test.in)
if (err != nil) != test.err {
t.Errorf("%d: Expecting error %v but got error %v", i, test.err, err)
}
got := int64(ss)
if test.want != got {
t.Errorf("%d: Want %v got %v", i, test.want, got)
}
}
}

View File

@@ -1,12 +0,0 @@
// +build !windows
package fs
// isDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
//
// On non windows platforms we don't have drive letters so we always
// return false
func isDriveLetter(name string) bool {
return false
}

View File

@@ -1,13 +0,0 @@
// +build windows
package fs
// isDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
func isDriveLetter(name string) bool {
if len(name) != 1 {
return false
}
c := name[0]
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"io"
"log"
"path/filepath"
"regexp"
"time"
)
@@ -21,8 +20,6 @@ const (
var (
// Filesystem registry
fsRegistry []*FsInfo
// Error returned by NewFs if not found in config file
NotFoundInConfigFile = fmt.Errorf("Didn't find section in config file")
)
// Filesystem info
@@ -82,13 +79,9 @@ type Fs interface {
Put(in io.Reader, remote string, modTime time.Time, size int64) (Object, error)
// Make the directory (container, bucket)
//
// Shouldn't return an error if it already exists
Mkdir() error
// Remove the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
Rmdir() error
// Precision of the ModTimes in this Fs
@@ -138,40 +131,9 @@ type Purger interface {
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
Purge() error
}
// An optional interface for error as to whether the operation should be retried
//
// This should be returned from Update or Put methods as required
type Retry interface {
error
Retry() bool
}
// A type of error
type retryError string
// Error interface
func (r retryError) Error() string {
return string(r)
}
// Retry interface
func (r retryError) Retry() bool {
return true
}
// Check interface
var _ Retry = retryError("")
// RetryErrorf makes an error which indicates it would like to be retried
func RetryErrorf(format string, a ...interface{}) error {
return retryError(fmt.Sprintf(format, a...))
}
// A channel of Objects
type ObjectsChan chan Object
@@ -197,6 +159,9 @@ type Dir struct {
// A channel of Dir objects
type DirChan chan *Dir
// Pattern to match a url
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
// Finds a FsInfo object for the name passed in
//
// Services are looked up in the config file
@@ -209,43 +174,34 @@ func Find(name string) (*FsInfo, error) {
return nil, fmt.Errorf("Didn't find filing system for %q", name)
}
// Pattern to match an rclone url
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
// NewFs makes a new Fs object from the path
//
// The path is of the form remote:path
// The path is of the form service://path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
// Services are looked up in the config file
func NewFs(path string) (Fs, error) {
parts := matcher.FindStringSubmatch(path)
fsName, configName, fsPath := "local", "local", path
if parts != nil && !isDriveLetter(parts[1]) {
if parts != nil {
configName, fsPath = parts[1], parts[2]
var err error
fsName, err = ConfigFile.GetValue(configName, "type")
if err != nil {
return nil, NotFoundInConfigFile
return nil, fmt.Errorf("Didn't find section in config file for %q", configName)
}
}
fs, err := Find(fsName)
if err != nil {
return nil, err
}
// change native directory separators to / if there are any
fsPath = filepath.ToSlash(fsPath)
return fs.NewFs(configName, fsPath)
}
// Outputs log for object
func OutputLog(o interface{}, text string, args ...interface{}) {
description := ""
if o != nil {
description = fmt.Sprintf("%v: ", o)
if x, ok := o.(fmt.Stringer); ok {
description = x.String() + ": "
}
out := fmt.Sprintf(text, args...)
log.Print(description + out)

View File

@@ -4,9 +4,7 @@ package fs
import (
"fmt"
"io"
"mime"
"path"
"log"
"sync"
)
@@ -99,29 +97,16 @@ func Equal(src, dst Object) bool {
return true
}
// Returns a guess at the mime type from the extension
func MimeType(o Object) string {
mimeType := mime.TypeByExtension(path.Ext(o.Remote()))
if mimeType == "" {
mimeType = "application/octet-stream"
}
return mimeType
}
// Used to remove a failed copy
//
// Returns whether the file was succesfully removed or not
func removeFailedCopy(dst Object) bool {
if dst == nil {
return false
func removeFailedCopy(dst Object) {
if dst != nil {
Debug(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Stats.Error()
Log(dst, "Failed to remove failed copy: %s", removeErr)
}
}
Debug(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Debug(dst, "Failed to remove failed copy: %s", removeErr)
return false
}
return true
}
// Copy src object to dst or f if nil
@@ -130,10 +115,6 @@ func removeFailedCopy(dst Object) bool {
// call Copy() with dst nil on a pre-existing file then some filing
// systems (eg Drive) may duplicate the file.
func Copy(f Fs, dst, src Object) {
const maxTries = 10
tries := 0
doUpdate := dst != nil
tryAgain:
in0, err := src.Open()
if err != nil {
Stats.Error()
@@ -143,7 +124,7 @@ tryAgain:
in := NewAccount(in0) // account the transfer
var actionTaken string
if doUpdate {
if dst != nil {
actionTaken = "Copied (updated existing)"
err = dst.Update(in, src.ModTime(), src.Size())
} else {
@@ -151,17 +132,6 @@ tryAgain:
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
}
inErr := in.Close()
// Retry if err returned a retry error
if r, ok := err.(Retry); ok && r.Retry() && tries < maxTries {
tries++
Log(src, "Received error: %v - retrying %d/%d", err, tries, maxTries)
if removeFailedCopy(dst) {
// If we removed dst, then nil it out and note we are not updating
dst = nil
doUpdate = false
}
goto tryAgain
}
if err == nil {
err = inErr
}
@@ -255,10 +225,12 @@ func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
func DeleteFiles(to_be_deleted ObjectsChan) {
var wg sync.WaitGroup
wg.Add(Config.Transfers)
var fs Fs
for i := 0; i < Config.Transfers; i++ {
go func() {
defer wg.Done()
for dst := range to_be_deleted {
fs = dst.Fs()
if Config.DryRun {
Debug(dst, "Not deleting as --dry-run")
} else {
@@ -275,22 +247,9 @@ func DeleteFiles(to_be_deleted ObjectsChan) {
}
}()
}
Log(nil, "Waiting for deletions to finish")
wg.Wait()
}
// Read a map of Object.Remote to Object for the given Fs
func readFilesMap(fs Fs) map[string]Object {
files := make(map[string]Object)
for o := range fs.List() {
remote := o.Remote()
if _, ok := files[remote]; !ok {
files[remote] = o
} else {
Log(o, "Duplicate file detected")
}
}
return files
Log(fs, "Waiting for deletions to finish")
wg.Wait()
}
// Syncs fsrc into fdst
@@ -307,7 +266,10 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
// Read the destination files first
// FIXME could do this in parallel and make it use less memory
delFiles := readFilesMap(fdst)
delFiles := make(map[string]Object)
for dst := range fdst.List() {
delFiles[dst.Remote()] = dst
}
// Read source files checking them off against dest files
to_be_checked := make(ObjectPairChan, Config.Transfers)
@@ -372,20 +334,22 @@ func Check(fdst, fsrc Fs) error {
// Read the destination files first
// FIXME could do this in parallel and make it use less memory
dstFiles := readFilesMap(fdst)
dstFiles := make(map[string]Object)
for dst := range fdst.List() {
dstFiles[dst.Remote()] = dst
}
// Read the source files checking them against dstFiles
// FIXME could do this in parallel and make it use less memory
srcFiles := readFilesMap(fsrc)
// Move all the common files into commonFiles and delete then
// from srcFiles and dstFiles
srcFiles := make(map[string]Object)
commonFiles := make(map[string][]Object)
for remote, src := range srcFiles {
for src := range fsrc.List() {
remote := src.Remote()
if dst, ok := dstFiles[remote]; ok {
commonFiles[remote] = []Object{dst, src}
delete(srcFiles, remote)
delete(dstFiles, remote)
} else {
srcFiles[remote] = src
}
}
@@ -465,24 +429,14 @@ func ListFn(f Fs, fn func(Object)) error {
return nil
}
// mutex for synchronized output
var outMutex sync.Mutex
// Synchronized fmt.Fprintf
func syncFprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
outMutex.Lock()
defer outMutex.Unlock()
return fmt.Fprintf(w, format, a...)
}
// List the Fs to stdout
//
// Shows size and path
//
// Lists in parallel which may get them out of order
func List(f Fs, w io.Writer) error {
func List(f Fs) error {
return ListFn(f, func(o Object) {
syncFprintf(w, "%9d %s\n", o.Size(), o.Remote())
fmt.Printf("%9d %s\n", o.Size(), o.Remote())
})
}
@@ -491,12 +445,12 @@ func List(f Fs, w io.Writer) error {
// Shows size, mod time and path
//
// Lists in parallel which may get them out of order
func ListLong(f Fs, w io.Writer) error {
func ListLong(f Fs) error {
return ListFn(f, func(o Object) {
Stats.Checking(o)
modTime := o.ModTime()
Stats.DoneChecking(o)
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Format("2006-01-02 15:04:05.000000000"), o.Remote())
fmt.Printf("%9d %19s %s\n", o.Size(), modTime.Format("2006-01-02 15:04:05.00000000"), o.Remote())
})
}
@@ -505,7 +459,7 @@ func ListLong(f Fs, w io.Writer) error {
// Produces the same output as the md5sum command
//
// Lists in parallel which may get them out of order
func Md5sum(f Fs, w io.Writer) error {
func Md5sum(f Fs) error {
return ListFn(f, func(o Object) {
Stats.Checking(o)
md5sum, err := o.Md5sum()
@@ -514,14 +468,14 @@ func Md5sum(f Fs, w io.Writer) error {
Debug(o, "Failed to read MD5: %v", err)
md5sum = "UNKNOWN"
}
syncFprintf(w, "%32s %s\n", md5sum, o.Remote())
fmt.Printf("%32s %s\n", md5sum, o.Remote())
})
}
// List the directories/buckets/containers in the Fs to stdout
func ListDir(f Fs, w io.Writer) error {
func ListDir(f Fs) error {
for dir := range f.ListDir() {
syncFprintf(w, "%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
fmt.Printf("%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
}
return nil
}
@@ -554,21 +508,20 @@ func Rmdir(f Fs) error {
//
// FIXME doesn't delete local directories
func Purge(f Fs) error {
var err error
if purger, ok := f.(Purger); ok {
if Config.DryRun {
Debug(f, "Not purging as --dry-run set")
} else {
err = purger.Purge()
err := purger.Purge()
if err != nil {
Stats.Error()
return err
}
}
} else {
// DeleteFiles and Rmdir observe --dry-run
DeleteFiles(f.List())
err = Rmdir(f)
}
if err != nil {
Stats.Error()
return err
log.Printf("Deleting path")
Rmdir(f)
}
return nil
}

View File

@@ -1,336 +0,0 @@
// Test rclone by doing real transactions to a storage provider to and
// from the local disk
package fs_test
import (
"bytes"
"flag"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest"
// Active file systems
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
)
// Globals
var (
localName, remoteName string
flocal, fremote fs.Fs
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
SubDir = flag.Bool("subdir", false, "Set to test with a sub directory")
Verbose = flag.Bool("verbose", false, "Set to enable logging")
finalise func()
)
// Write a file
func WriteFile(filePath, content string, t time.Time) {
// FIXME make directories?
filePath = path.Join(localName, filePath)
dirPath := path.Dir(filePath)
err := os.MkdirAll(dirPath, 0770)
if err != nil {
log.Fatalf("Failed to make directories %q: %v", dirPath, err)
}
err = ioutil.WriteFile(filePath, []byte(content), 0600)
if err != nil {
log.Fatalf("Failed to write file %q: %v", filePath, err)
}
err = os.Chtimes(filePath, t, t)
if err != nil {
log.Fatalf("Failed to chtimes file %q: %v", filePath, err)
}
}
var t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
var t2 = fstest.Time("2011-12-25T12:59:59.123456789Z")
var t3 = fstest.Time("2011-12-30T12:59:59.000000000Z")
func TestInit(t *testing.T) {
fs.LoadConfig()
fs.Config.Verbose = *Verbose
fs.Config.Quiet = !*Verbose
var err error
fremote, finalise, err = fstest.RandomRemote(*RemoteName, *SubDir)
if err != nil {
t.Fatalf("Failed to open remote %q: %v", *RemoteName, err)
}
t.Logf("Testing with remote %v", fremote)
localName, err = ioutil.TempDir("", "rclone")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
localName = filepath.ToSlash(localName)
t.Logf("Testing with local %q", localName)
flocal, err = fs.NewFs(localName)
if err != nil {
t.Fatalf("Failed to make %q: %v", remoteName, err)
}
}
func TestCalculateModifyWindow(t *testing.T) {
fs.CalculateModifyWindow(fremote, flocal)
t.Logf("ModifyWindow is %q", fs.Config.ModifyWindow)
}
func TestMkdir(t *testing.T) {
fstest.TestMkdir(t, fremote)
}
// Check dry run is working
func TestCopyWithDryRun(t *testing.T) {
WriteFile("sub dir/hello world", "hello world", t1)
fs.Config.DryRun = true
err := fs.Sync(fremote, flocal, false)
fs.Config.DryRun = false
if err != nil {
t.Fatalf("Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, []fstest.Item{}, fs.Config.ModifyWindow)
}
// Now without dry run
func TestCopy(t *testing.T) {
err := fs.Sync(fremote, flocal, false)
if err != nil {
t.Fatalf("Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestLsd(t *testing.T) {
var buf bytes.Buffer
err := fs.ListDir(fremote, &buf)
if err != nil {
t.Fatalf("ListDir failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, "sub dir\n") {
t.Fatalf("Result wrong %q", res)
}
}
// Now delete the local file and download it
func TestCopyAfterDelete(t *testing.T) {
err := os.Remove(localName + "/sub dir/hello world")
if err != nil {
t.Fatalf("Remove failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, []fstest.Item{}, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestCopyRedownload(t *testing.T) {
err := fs.Sync(flocal, fremote, false)
if err != nil {
t.Fatalf("Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
// Clean the directory
cleanTempDir(t)
}
func TestSyncAfterChangingModtimeOnly(t *testing.T) {
WriteFile("empty space", "", t1)
err := os.Chtimes(localName+"/empty space", t2, t2)
if err != nil {
t.Fatalf("Chtimes failed: %v", err)
}
err = fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestSyncAfterAddingAFile(t *testing.T) {
WriteFile("potato", "------------------------------------------------------------", t3)
err := fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
WriteFile("potato", "smaller but same date", t3)
err := fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Sync after changing a file's contents, modtime but not length
func TestSyncAfterChangingContentsOnly(t *testing.T) {
WriteFile("potato", "SMALLER BUT SAME DATE", t2)
err := fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Sync after removing a file and adding a file --dry-run
func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
WriteFile("potato2", "------------------------------------------------------------", t1)
err := os.Remove(localName + "/potato")
if err != nil {
t.Fatalf("Remove failed: %v", err)
}
fs.Config.DryRun = true
err = fs.Sync(fremote, flocal, true)
fs.Config.DryRun = false
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
before := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, before, fs.Config.ModifyWindow)
}
// Sync after removing a file and adding a file
func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
err := fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestLs(t *testing.T) {
var buf bytes.Buffer
err := fs.List(fremote, &buf)
if err != nil {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, " 0 empty space\n") {
t.Errorf("empty space missing: %q", res)
}
if !strings.Contains(res, " 60 potato2\n") {
t.Errorf("potato2 missing: %q", res)
}
}
func TestLsLong(t *testing.T) {
var buf bytes.Buffer
err := fs.ListLong(fremote, &buf)
if err != nil {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
m1 := regexp.MustCompile(`(?m)^ 0 2011-12-25 12:59:59\.\d{9} empty space$`)
if !m1.MatchString(res) {
t.Errorf("empty space missing: %q", res)
}
m2 := regexp.MustCompile(`(?m)^ 60 2001-02-03 04:05:06\.\d{9} potato2$`)
if !m2.MatchString(res) {
t.Errorf("potato2 missing: %q", res)
}
}
func TestMd5sum(t *testing.T) {
var buf bytes.Buffer
err := fs.Md5sum(fremote, &buf)
if err != nil {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") {
t.Errorf("empty space missing: %q", res)
}
if !strings.Contains(res, "6548b156ea68a4e003e786df99eee76 potato2\n") {
t.Errorf("potato2 missing: %q", res)
}
}
func TestCheck(t *testing.T) {
}
// Clean the temporary directory
func cleanTempDir(t *testing.T) {
t.Logf("Cleaning temporary directory: %q", localName)
err := os.RemoveAll(localName)
if err != nil {
t.Logf("Failed to remove %q: %v", localName, err)
}
}
func TestFinalise(t *testing.T) {
finalise()
cleanTempDir(t)
}

View File

@@ -1,29 +0,0 @@
#!/bin/bash
go install
REMOTES="
TestSwift:
TestS3:
TestDrive:
TestGoogleCloudStorage:
TestDropbox:
"
function test_remote {
args=$@
echo "@go test $args"
go test $args || {
echo "*** test $args FAILED ***"
exit 1
}
}
test_remote
test_remote --subdir
for remote in $REMOTES; do
test_remote --remote $remote
test_remote --remote $remote --subdir
done
echo "All OK"

View File

@@ -1,3 +1,3 @@
package fs
const Version = "v1.13"
const Version = "v1.02"

View File

@@ -1,232 +0,0 @@
// Utilities for testing the fs
package fstest
// FIXME put name of test FS in Fs structure
import (
"io/ioutil"
"log"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/ncw/rclone/fs"
)
// Seed the random number generator
func init() {
rand.Seed(time.Now().UnixNano())
}
// Represents an item for checking
type Item struct {
Path string
Md5sum string
ModTime time.Time
Size int64
}
// check the mod time to the given precision
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
dt := modTime.Sub(i.ModTime)
if dt >= precision || dt <= -precision {
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision)
}
}
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
if obj == nil {
t.Fatalf("Object is nil")
}
// Check attributes
Md5sum, err := obj.Md5sum()
if err != nil {
t.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
}
if i.Md5sum != Md5sum {
t.Errorf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
}
if i.Size != obj.Size() {
t.Errorf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
}
i.CheckModTime(t, obj, obj.ModTime(), precision)
}
// Represents all items for checking
type Items struct {
byName map[string]*Item
items []Item
}
// Make an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[items[i].Path] = &items[i]
}
return is
}
// Check off an item
func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
i, ok := is.byName[obj.Remote()]
if !ok {
t.Errorf("Unexpected file %q", obj.Remote())
return
}
delete(is.byName, obj.Remote())
i.Check(t, obj, precision)
}
// Check all done
func (is *Items) Done(t *testing.T) {
if len(is.byName) != 0 {
for name := range is.byName {
log.Printf("Not found %q", name)
}
t.Errorf("%d objects not found", len(is.byName))
}
}
// Checks the fs to see if it has the expected contents
func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision time.Duration) {
is := NewItems(items)
for obj := range f.List() {
if obj == nil {
t.Errorf("Unexpected nil in List()")
continue
}
is.Find(t, obj, precision)
}
is.Done(t)
}
// Checks the fs to see if it has the expected contents
func CheckListing(t *testing.T, f fs.Fs, items []Item) {
precision := f.Precision()
CheckListingWithPrecision(t, f, items, precision)
}
// Parse a time string or explode
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// Create a random string
func RandomString(n int) string {
source := "abcdefghijklmnopqrstuvwxyz0123456789"
out := make([]byte, n)
for i := range out {
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
// Creates a temporary directory name for local remotes
func LocalRemote() (path string, err error) {
path, err = ioutil.TempDir("", "rclone")
if err == nil {
// Now remove the directory
err = os.Remove(path)
}
path = filepath.ToSlash(path)
return
}
// Make a random bucket or subdirectory name
//
// Returns a random remote name plus the leaf name
func RandomRemoteName(remoteName string) (string, string, error) {
var err error
var leafName string
// Make a directory if remote name is null
if remoteName == "" {
remoteName, err = LocalRemote()
if err != nil {
return "", "", err
}
} else {
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
leafName = RandomString(32)
remoteName += leafName
}
return remoteName, leafName, nil
}
// Make a random bucket or subdirectory on the remote
//
// Call the finalise function returned to Purge the fs at the end (and
// the parent if necessary)
func RandomRemote(remoteName string, subdir bool) (fs.Fs, func(), error) {
var err error
var parentRemote fs.Fs
remoteName, _, err = RandomRemoteName(remoteName)
if err != nil {
return nil, nil, err
}
if subdir {
parentRemote, err = fs.NewFs(remoteName)
if err != nil {
return nil, nil, err
}
remoteName += "/" + RandomString(8)
}
remote, err := fs.NewFs(remoteName)
if err != nil {
return nil, nil, err
}
finalise := func() {
_ = fs.Purge(remote) // ignore error
if parentRemote != nil {
err = fs.Purge(parentRemote) // ignore error
if err != nil {
log.Printf("Failed to purge %v: %v", parentRemote, err)
}
}
}
return remote, finalise, nil
}
func TestMkdir(t *testing.T, remote fs.Fs) {
err := fs.Mkdir(remote)
if err != nil {
t.Fatalf("Mkdir failed: %v", err)
}
CheckListing(t, remote, []Item{})
}
func TestPurge(t *testing.T, remote fs.Fs) {
err := fs.Purge(remote)
if err != nil {
t.Fatalf("Purge failed: %v", err)
}
CheckListing(t, remote, []Item{})
}
func TestRmdir(t *testing.T, remote fs.Fs) {
err := fs.Rmdir(remote)
if err != nil {
t.Fatalf("Rmdir failed: %v", err)
}
}

View File

@@ -1,438 +0,0 @@
// Generic tests for testing the Fs and Object interfaces
package fstests
import (
"bytes"
"crypto/md5"
"encoding/hex"
"io"
"log"
"os"
"strings"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest"
)
var (
remote fs.Fs
RemoteName = ""
subRemoteName = ""
subRemoteLeaf = ""
NilObject fs.Object
file1 = fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: "file name.txt",
}
file2 = fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`,
}
)
func TestInit(t *testing.T) {
var err error
fs.LoadConfig()
fs.Config.Verbose = false
fs.Config.Quiet = true
if RemoteName == "" {
RemoteName, err = fstest.LocalRemote()
if err != nil {
log.Fatalf("Failed to create tmp dir: %v", err)
}
}
subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(RemoteName)
if err != nil {
t.Fatalf("Couldn't make remote name: %v", err)
}
remote, err = fs.NewFs(subRemoteName)
if err == fs.NotFoundInConfigFile {
log.Printf("Didn't find %q in config file - skipping tests", RemoteName)
return
}
if err != nil {
t.Fatalf("Couldn't start FS: %v", err)
}
fstest.TestMkdir(t, remote)
}
func skipIfNotOk(t *testing.T) {
if remote == nil {
t.Skip("FS not configured")
}
}
// String returns a description of the FS
func TestFsString(t *testing.T) {
skipIfNotOk(t)
str := remote.String()
if str == "" {
t.Fatal("Bad fs.String()")
}
}
type TestFile struct {
ModTime time.Time
Path string
Size int64
Md5sum string
}
func TestFsRmdirEmpty(t *testing.T) {
skipIfNotOk(t)
fstest.TestRmdir(t, remote)
}
func TestFsRmdirNotFound(t *testing.T) {
skipIfNotOk(t)
err := remote.Rmdir()
if err == nil {
t.Fatalf("Expecting error on Rmdir non existent")
}
}
func TestFsMkdir(t *testing.T) {
skipIfNotOk(t)
fstest.TestMkdir(t, remote)
fstest.TestMkdir(t, remote)
}
func TestFsListEmpty(t *testing.T) {
skipIfNotOk(t)
fstest.CheckListing(t, remote, []fstest.Item{})
}
func TestFsListDirEmpty(t *testing.T) {
skipIfNotOk(t)
for obj := range remote.ListDir() {
t.Errorf("Found unexpected item %q", obj.Name)
}
}
func TestFsNewFsObjectNotFound(t *testing.T) {
skipIfNotOk(t)
if remote.NewFsObject("potato") != nil {
t.Fatal("Didn't expect to find object")
}
}
func findObject(t *testing.T, Name string) fs.Object {
obj := remote.NewFsObject(Name)
if obj == nil {
t.Fatalf("Object not found: %q", Name)
}
return obj
}
func testPut(t *testing.T, file *fstest.Item) {
buf := bytes.NewBufferString(fstest.RandomString(100))
hash := md5.New()
in := io.TeeReader(buf, hash)
file.Size = int64(buf.Len())
obj, err := remote.Put(in, file.Path, file.ModTime, file.Size)
if err != nil {
t.Fatal("Put error", err)
}
file.Md5sum = hex.EncodeToString(hash.Sum(nil))
file.Check(t, obj, remote.Precision())
// Re-read the object and check again
obj = findObject(t, file.Path)
file.Check(t, obj, remote.Precision())
}
func TestFsPutFile1(t *testing.T) {
skipIfNotOk(t)
testPut(t, &file1)
}
func TestFsPutFile2(t *testing.T) {
skipIfNotOk(t)
testPut(t, &file2)
}
func TestFsListDirFile2(t *testing.T) {
skipIfNotOk(t)
found := false
for obj := range remote.ListDir() {
if obj.Name != `hello? sausage` {
t.Errorf("Found unexpected item %q", obj.Name)
} else {
found = true
}
}
if !found {
t.Errorf("Didn't find %q", `hello? sausage`)
}
}
func TestFsListDirRoot(t *testing.T) {
skipIfNotOk(t)
rootRemote, err := fs.NewFs(RemoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", RemoteName, err)
}
found := false
for obj := range rootRemote.ListDir() {
if obj.Name == subRemoteLeaf {
found = true
}
}
if !found {
t.Errorf("Didn't find %q", subRemoteLeaf)
}
}
func TestFsListRoot(t *testing.T) {
skipIfNotOk(t)
rootRemote, err := fs.NewFs(RemoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", RemoteName, err)
}
// Should either find file1 and file2 or nothing
found1 := false
file1 := subRemoteLeaf + "/" + file1.Path
found2 := false
file2 := subRemoteLeaf + "/" + file2.Path
count := 0
errors := fs.Stats.GetErrors()
for obj := range rootRemote.List() {
count++
if obj.Remote() == file1 {
found1 = true
}
if obj.Remote() == file2 {
found2 = true
}
}
errors -= fs.Stats.GetErrors()
if count == 0 {
if errors == 0 {
t.Error("Expecting error if count==0")
}
return
}
if found1 && found2 {
if errors != 0 {
t.Error("Not expecting error if found")
}
return
}
t.Errorf("Didn't find %q (%v) and %q (%v) or no files (count %d)", file1, found1, file2, found2, count)
}
func TestFsListFile1(t *testing.T) {
skipIfNotOk(t)
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
}
func TestFsNewFsObject(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
file1.Check(t, obj, remote.Precision())
}
func TestFsListFile1and2(t *testing.T) {
skipIfNotOk(t)
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
}
func TestFsRmdirFull(t *testing.T) {
skipIfNotOk(t)
err := remote.Rmdir()
if err == nil {
t.Fatalf("Expecting error on RMdir on non empty remote")
}
}
func TestFsPrecision(t *testing.T) {
skipIfNotOk(t)
precision := remote.Precision()
if precision > time.Second || precision < 0 {
t.Fatalf("Precision out of range %v", precision)
}
// FIXME check expected precision
}
func TestObjectString(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
s := obj.String()
if s != file1.Path {
t.Errorf("String() wrong %v != %v", s, file1.Path)
}
obj = NilObject
s = obj.String()
if s != "<nil>" {
t.Errorf("String() wrong %v != %v", s, "<nil>")
}
}
func TestObjectFs(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if obj.Fs() != remote {
t.Errorf("Fs is wrong %v != %v", obj.Fs(), remote)
}
}
func TestObjectRemote(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if obj.Remote() != file1.Path {
t.Errorf("Remote is wrong %v != %v", obj.Remote(), file1.Path)
}
}
func TestObjectMd5sum(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
Md5sum, err := obj.Md5sum()
if err != nil {
t.Errorf("Error in Md5sum: %v", err)
}
if Md5sum != file1.Md5sum {
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
}
}
func TestObjectModTime(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
}
func TestObjectSetModTime(t *testing.T) {
skipIfNotOk(t)
newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z")
obj := findObject(t, file1.Path)
obj.SetModTime(newModTime)
file1.ModTime = newModTime
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
// And make a new object and read it from there too
TestObjectModTime(t)
}
func TestObjectSize(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if obj.Size() != file1.Size {
t.Errorf("Size is wrong %v != %v", obj.Size(), file1.Size)
}
}
func TestObjectOpen(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
in, err := obj.Open()
if err != nil {
t.Fatalf("Open() return error: %v", err)
}
hash := md5.New()
n, err := io.Copy(hash, in)
if err != nil {
t.Fatalf("io.Copy() return error: %v", err)
}
if n != file1.Size {
t.Fatalf("Read wrong number of bytes %d != %d", n, file1.Size)
}
err = in.Close()
if err != nil {
t.Fatalf("in.Close() return error: %v", err)
}
Md5sum := hex.EncodeToString(hash.Sum(nil))
if Md5sum != file1.Md5sum {
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
}
}
func TestObjectUpdate(t *testing.T) {
skipIfNotOk(t)
buf := bytes.NewBufferString(fstest.RandomString(200))
hash := md5.New()
in := io.TeeReader(buf, hash)
file1.Size = int64(buf.Len())
obj := findObject(t, file1.Path)
err := obj.Update(in, file1.ModTime, file1.Size)
if err != nil {
t.Fatal("Update error", err)
}
file1.Md5sum = hex.EncodeToString(hash.Sum(nil))
file1.Check(t, obj, remote.Precision())
// Re-read the object and check again
obj = findObject(t, file1.Path)
file1.Check(t, obj, remote.Precision())
}
func TestObjectStorable(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if !obj.Storable() {
t.Fatalf("Expecting %v to be storable", obj)
}
}
func TestLimitedFs(t *testing.T) {
skipIfNotOk(t)
remoteName := subRemoteName + "/" + file2.Path
file2Copy := file2
file2Copy.Path = "z.txt"
fileRemote, err := fs.NewFs(remoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", remoteName, err)
}
fstest.CheckListing(t, fileRemote, []fstest.Item{file2Copy})
_, ok := fileRemote.(*fs.Limited)
if !ok {
t.Errorf("%v is not a fs.Limited", fileRemote)
}
}
func TestLimitedFsNotFound(t *testing.T) {
skipIfNotOk(t)
remoteName := subRemoteName + "/not found.txt"
fileRemote, err := fs.NewFs(remoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", remoteName, err)
}
fstest.CheckListing(t, fileRemote, []fstest.Item{})
_, ok := fileRemote.(*fs.Limited)
if ok {
t.Errorf("%v is is a fs.Limited", fileRemote)
}
}
func TestObjectRemove(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
err := obj.Remove()
if err != nil {
t.Fatal("Remove error", err)
}
fstest.CheckListing(t, remote, []fstest.Item{file2})
}
func TestObjectPurge(t *testing.T) {
skipIfNotOk(t)
fstest.TestPurge(t, remote)
err := fs.Purge(remote)
if err == nil {
t.Fatal("Expecting error after on second purge")
}
}
func TestFinalise(t *testing.T) {
skipIfNotOk(t)
if strings.HasPrefix(RemoteName, "/") {
// Remove temp directory
err := os.Remove(RemoteName)
if err != nil {
log.Printf("Failed to remove %q: %v\n", RemoteName, err)
}
}
}

View File

@@ -1,143 +0,0 @@
// +build ignore
// Make the test files from fstests.go
package main
import (
"bufio"
"html/template"
"log"
"os"
"os/exec"
"regexp"
"strings"
)
// Search fstests.go and return all the test function names
func findTestFunctions() []string {
fns := []string{}
matcher := regexp.MustCompile(`^func\s+(Test.*?)\(`)
in, err := os.Open("fstests.go")
if err != nil {
log.Fatalf("Couldn't open fstests.go: %v", err)
}
defer in.Close()
scanner := bufio.NewScanner(in)
for scanner.Scan() {
line := scanner.Text()
matches := matcher.FindStringSubmatch(line)
if len(matches) > 0 {
fns = append(fns, matches[1])
}
}
if err := scanner.Err(); err != nil {
log.Fatalf("Error scanning file: %v", err)
}
return fns
}
// Data to substitute
type Data struct {
Regenerate string
FsName string
UpperFsName string
TestName string
ObjectName string
Fns []string
}
var testProgram = `
// Test {{ .UpperFsName }} filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: {{ .Regenerate }}
package {{ .FsName }}_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/{{ .FsName }}"
)
func init() {
fstests.NilObject = fs.Object((*{{ .FsName }}.FsObject{{ .ObjectName }})(nil))
fstests.RemoteName = "{{ .TestName }}"
}
// Generic tests for the Fs
{{ range $fn := .Fns }}func {{ $fn }}(t *testing.T){ fstests.{{ $fn }}(t) }
{{ end }}
`
// Generate test file piping it through gofmt
func generateTestProgram(t *template.Template, fns []string, Fsname string) {
fsname := strings.ToLower(Fsname)
TestName := "Test" + Fsname + ":"
outfile := "../../" + fsname + "/" + fsname + "_test.go"
// Find last capitalised group to be object name
matcher := regexp.MustCompile(`([A-Z][a-z0-9]+)$`)
matches := matcher.FindStringSubmatch(Fsname)
if len(matches) == 0 {
log.Fatalf("Couldn't find object name in %q", Fsname)
}
ObjectName := matches[1]
if fsname == "local" {
TestName = ""
}
data := Data{
Regenerate: "go run gen_tests.go or make gen_tests",
FsName: fsname,
UpperFsName: Fsname,
TestName: TestName,
ObjectName: ObjectName,
Fns: fns,
}
cmd := exec.Command("gofmt")
log.Printf("Writing %q", outfile)
out, err := os.Create(outfile)
if err != nil {
log.Fatal(err)
}
cmd.Stdout = out
gofmt, err := cmd.StdinPipe()
if err != nil {
log.Fatal(err)
}
if err = cmd.Start(); err != nil {
log.Fatal(err)
}
if err = t.Execute(gofmt, data); err != nil {
log.Fatal(err)
}
if err = gofmt.Close(); err != nil {
log.Fatal(err)
}
if err = cmd.Wait(); err != nil {
log.Fatal(err)
}
if err = out.Close(); err != nil {
log.Fatal(err)
}
}
func main() {
fns := findTestFunctions()
t := template.Must(template.New("main").Parse(testProgram))
generateTestProgram(t, fns, "Local")
generateTestProgram(t, fns, "Swift")
generateTestProgram(t, fns, "S3")
generateTestProgram(t, fns, "Drive")
generateTestProgram(t, fns, "GoogleCloudStorage")
generateTestProgram(t, fns, "Dropbox")
log.Printf("Done")
}

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"log"
"net/http"
"code.google.com/p/goauth2/oauth"
"github.com/ncw/rclone/fs"
@@ -81,7 +82,7 @@ func (auth *Auth) newTransport(name string) (*oauth.Transport, error) {
t := &oauth.Transport{
Config: config,
Transport: fs.Config.Transport(),
Transport: http.DefaultTransport,
}
return t, nil

View File

@@ -17,14 +17,15 @@ import (
"encoding/hex"
"fmt"
"io"
"mime"
"net/http"
"path"
"regexp"
"strings"
"time"
"google.golang.org/api/googleapi"
"google.golang.org/api/storage/v1"
"code.google.com/p/google-api-go-client/googleapi"
"code.google.com/p/google-api-go-client/storage/v1"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/googleauth"
@@ -33,8 +34,8 @@ import (
const (
rcloneClientId = "202264815644.apps.googleusercontent.com"
rcloneClientSecret = "X4Z3ca8xfWDb1Voo-F9a7ZxJ"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
RFC3339In = time.RFC3339
RFC3339Out = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 256 // chunk size to read directory listings
)
@@ -214,7 +215,7 @@ func NewFs(name, root string) (fs.Fs, error) {
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsStorage) newFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
func (f *FsStorage) NewFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
o := &FsObjectStorage{
storage: f,
remote: remote,
@@ -235,7 +236,7 @@ func (f *FsStorage) newFsObjectWithInfo(remote string, info *storage.Object) fs.
//
// May return nil if an error occurred
func (f *FsStorage) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
return f.NewFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
@@ -254,23 +255,16 @@ func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
return
}
if !directories {
for _, object := range objects.Items {
if !strings.HasPrefix(object.Name, f.root) {
fs.Log(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
fn(remote, object)
for _, object := range objects.Items {
if directories && !strings.HasSuffix(object.Name, "/") {
continue
}
} else {
var object storage.Object
for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") {
continue
}
fn(prefix[:len(prefix)-1], &object)
if !strings.HasPrefix(object.Name, f.root) {
fs.Log(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
fn(remote, object)
}
if objects.NextPageToken == "" {
break
@@ -292,7 +286,7 @@ func (f *FsStorage) List() fs.ObjectsChan {
go func() {
defer close(out)
f.list(false, func(remote string, object *storage.Object) {
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
@@ -358,8 +352,8 @@ func (f *FsStorage) ListDir() fs.DirChan {
// The new object may have been created if an error is returned
func (f *FsStorage) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction
o := &FsObjectStorage{storage: f, remote: remote}
return o, o.Update(in, modTime, size)
fs := &FsObjectStorage{storage: f, remote: remote}
return fs, fs.Update(in, modTime, size)
}
// Mkdir creates the bucket if it doesn't exist
@@ -440,7 +434,7 @@ func (o *FsObjectStorage) setMetaData(info *storage.Object) {
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(timeFormatIn, mtimeString)
modTime, err := time.Parse(RFC3339In, mtimeString)
if err == nil {
o.modTime = modTime
return
@@ -450,7 +444,7 @@ func (o *FsObjectStorage) setMetaData(info *storage.Object) {
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormatIn, info.Updated)
modTime, err := time.Parse(RFC3339In, info.Updated)
if err != nil {
fs.Log(o, "Bad time decode: %v", err)
} else {
@@ -490,7 +484,7 @@ func (o *FsObjectStorage) ModTime() time.Time {
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormatOut)
metadata[metaMtime] = modTime.Format(RFC3339Out)
return metadata
}
@@ -502,12 +496,11 @@ func (o *FsObjectStorage) SetModTime(modTime time.Time) {
Name: o.storage.root + o.remote,
Metadata: metadataFromModTime(modTime),
}
newObject, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do()
_, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
}
o.setMetaData(newObject)
}
// Is this object storable
@@ -537,7 +530,7 @@ func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
return nil, err
}
if res.StatusCode != 200 {
_ = res.Body.Close() // ignore error
res.Body.Close()
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
@@ -547,21 +540,24 @@ func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
//
// The new object may have been created if an error is returned
func (o *FsObjectStorage) Update(in io.Reader, modTime time.Time, size int64) error {
// Guess the content type
contentType := mime.TypeByExtension(path.Ext(o.remote))
if contentType == "" {
contentType = "application/octet-stream"
}
object := storage.Object{
Bucket: o.storage.bucket,
Name: o.storage.root + o.remote,
ContentType: fs.MimeType(o),
ContentType: contentType,
Size: uint64(size),
Updated: modTime.Format(timeFormatOut), // Doesn't get set
Updated: modTime.Format(RFC3339Out), // Doesn't get set
Metadata: metadataFromModTime(modTime),
}
newObject, err := o.storage.svc.Objects.Insert(o.storage.bucket, &object).Media(in).Name(object.Name).PredefinedAcl(o.storage.objectAcl).Do()
if err != nil {
return err
}
// Set the metadata for the new object while we have it
o.setMetaData(newObject)
return nil
return err
}
// Remove an object

View File

@@ -1,53 +0,0 @@
// Test GoogleCloudStorage filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: go run gen_tests.go or make gen_tests
package googlecloudstorage_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/googlecloudstorage"
)
func init() {
fstests.NilObject = fs.Object((*googlecloudstorage.FsObjectStorage)(nil))
fstests.RemoteName = "TestGoogleCloudStorage:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,11 +1,6 @@
// Local filesystem interface
package local
// Note that all rclone paths should be / separated. Anything coming
// from the filepath module will have \ separators on windows so
// should be converted using filepath.ToSlash. Windows is quite happy
// with / separators so there is no need to convert them back.
import (
"crypto/md5"
"encoding/hex"
@@ -13,6 +8,7 @@ import (
"hash"
"io"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
@@ -50,7 +46,7 @@ type FsObjectLocal struct {
// NewFs contstructs an FsLocal from the path
func NewFs(name, root string) (fs.Fs, error) {
root = filepath.ToSlash(path.Clean(root))
root = path.Clean(root)
f := &FsLocal{root: root}
// Check to see if this points to a file
fi, err := os.Lstat(f.root)
@@ -73,9 +69,8 @@ func (f *FsLocal) String() string {
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
remote = filepath.ToSlash(remote)
path := path.Join(f.root, remote)
func (f *FsLocal) NewFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
path := filepath.Join(f.root, remote)
o := &FsObjectLocal{local: f, remote: remote, path: path}
if info != nil {
o.info = info
@@ -93,7 +88,7 @@ func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object
//
// May return nil if an error occurred
func (f *FsLocal) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
return f.NewFsObjectWithInfo(remote, nil)
}
// List the path returning a channel of FsObjects
@@ -105,19 +100,19 @@ func (f *FsLocal) List() fs.ObjectsChan {
err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
fs.Stats.Error()
fs.Log(f, "Failed to open directory: %s: %s", path, err)
log.Printf("Failed to open directory: %s: %s", path, err)
} else {
remote, err := filepath.Rel(f.root, path)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Failed to get relative path %s: %s", path, err)
log.Printf("Failed to get relative path %s: %s", path, err)
return nil
}
if remote == "." {
return nil
// remote = ""
}
if fs := f.newFsObjectWithInfo(remote, fi); fs != nil {
if fs := f.NewFsObjectWithInfo(remote, fi); fs != nil {
if fs.Storable() {
out <- fs
}
@@ -127,7 +122,7 @@ func (f *FsLocal) List() fs.ObjectsChan {
})
if err != nil {
fs.Stats.Error()
fs.Log(f, "Failed to open directory: %s: %s", f.root, err)
log.Printf("Failed to open directory: %s: %s", f.root, err)
}
close(out)
}()
@@ -142,7 +137,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
items, err := ioutil.ReadDir(f.root)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't find read directory: %s", err)
log.Printf("Couldn't find read directory: %s", err)
} else {
for _, item := range items {
if item.IsDir() {
@@ -157,7 +152,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
err := filepath.Walk(dirpath, func(path string, fi os.FileInfo, err error) error {
if err != nil {
fs.Stats.Error()
fs.Log(f, "Failed to open directory: %s: %s", path, err)
log.Printf("Failed to open directory: %s: %s", path, err)
} else {
dir.Count += 1
dir.Bytes += fi.Size()
@@ -166,7 +161,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
})
if err != nil {
fs.Stats.Error()
fs.Log(f, "Failed to open directory: %s: %s", dirpath, err)
log.Printf("Failed to open directory: %s: %s", dirpath, err)
}
out <- dir
}
@@ -179,7 +174,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
// Puts the FsObject to the local filesystem
func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
dstPath := path.Join(f.root, remote)
dstPath := filepath.Join(f.root, remote)
// Temporary FsObject under construction - info filled in by Update()
o := &FsObjectLocal{local: f, remote: remote, path: dstPath}
err := o.Update(in, modTime, size)
@@ -223,15 +218,12 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
}
path := fd.Name()
// fmt.Println("Created temp file", path)
err = fd.Close()
if err != nil {
return time.Second
}
fd.Close()
// Delete it on return
defer func() {
// fmt.Println("Remove temp file")
_ = os.Remove(path) // ignore error
os.Remove(path)
}()
// Find the minimum duration we can detect
@@ -267,13 +259,6 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *FsLocal) Purge() error {
fi, err := os.Lstat(f.root)
if err != nil {
return err
}
if !fi.Mode().IsDir() {
return fmt.Errorf("Can't Purge non directory: %q", f.root)
}
return os.RemoveAll(f.root)
}
@@ -340,13 +325,6 @@ func (o *FsObjectLocal) SetModTime(modTime time.Time) {
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
fs.Debug(o, "Failed to set mtime on file: %s", err)
return
}
// Re-read metadata
err = o.lstat()
if err != nil {
fs.Debug(o, "Failed to stat: %s", err)
return
}
}
@@ -357,7 +335,7 @@ func (o *FsObjectLocal) Storable() bool {
fs.Debug(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
// fs.Debug(o, "Skipping directory")
fs.Debug(o, "FIXME Skipping directory")
return false
}
return true

View File

@@ -1,53 +0,0 @@
// Test Local filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: go run gen_tests.go or make gen_tests
package local_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/local"
)
func init() {
fstests.NilObject = fs.Object((*local.FsObjectLocal)(nil))
fstests.RemoteName = ""
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,17 +1,3 @@
Change lsd command so it doesn't show -1
* Make sure all Fses show -1 for objects Zero for dates etc
* Make test?
Put the TestRemote names into the Fs description
Make test_all.sh use the TestRemote name automatically
Run errcheck and go vet in the make file
.. Also race detector?
Get rid of Storable?
Write developer manual
Todo
* FIXME: More -dry-run checks for object transfer
* Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files
@@ -19,23 +5,24 @@ Todo
* if object.PseudoDirectory {
* fmt.Printf("%9s %19s %s\n", "Directory", "-", fs.Remote())
* Make Account wrapper
* limit bandwidth for a pool of all individual connectinos
* do timeouts by setting a limit, seeing whether io has happened
and resetting it if it has
* make Account do progress meter
* Make logging controllable with flags (mostly done)
* -timeout: Make all timeouts be settable with command line parameters
* Windows paths? Do we need to translate / and \?
* Make a fs.Errorf and count errors and log them at a different level
* Add max object size to fs metadata - 5GB for swift, infinite for local, ? for s3
* tie into -max-size flag
* FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs
creation in common code? Or try for as much as possible?
* FIXME Account all the transactons (ls etc) using a different
Roundtripper wrapper which wraps the transactions?
More rsync features
* include
* exclude
* max size
* -c, --checksum skip based on checksum, not mod-time & size
Ideas for flags
* --retries N flag which would make rclone retry a sync until successful or it tried N times.
* FIXME write tests for local file system
* FIXME implement tests for single file operations in rclonetest
* Need to make directory objects otherwise can't upload an empty directory
* Or could upload empty directories only?
Ideas
* could do encryption - put IV into metadata?
@@ -48,6 +35,26 @@ Ideas
* control times sync (which is slow with some remotes) with -a --archive flag?
* Copy a glob pattern - could do with LimitedFs
s3
* Can maybe set last modified?
* https://forums.aws.amazon.com/message.jspa?messageID=214062
* Otherwise can set metadata
* Returns etag and last modified in bucket list
Bugs
* Non verbose - not sure number transferred got counted up? CHECK
* When doing copy it recurses the whole of the destination FS which isn't necessary
Making a release
* go install -v ./...
* go test ./...
* rclonetest/test.sh
* make tag
* edit README.md
* git commit fs/version.go README.md docs/content/downloads.md
* make retag
* . ~/bin/go-cross
* make cross
* make upload
* make upload_website
* git push --tags origin master

View File

@@ -28,9 +28,8 @@ import (
var (
// Flags
cpuprofile = pflag.StringP("cpuprofile", "", "", "Write cpu profile to file")
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats (0 to disable)")
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats")
version = pflag.BoolP("version", "V", false, "Print the version number")
logFile = pflag.StringP("log-file", "", "", "Log everything to this file")
)
type Command struct {
@@ -59,10 +58,10 @@ func (cmd *Command) checkArgs(args []string) {
var Commands = []Command{
{
Name: "copy",
ArgsHelp: "source:path dest:path",
ArgsHelp: "source://path dest://path",
Help: `
Copy the source to the destination. Doesn't transfer
unchanged files, testing by size and modification time or
unchanged files, testing first by modification time then by
MD5SUM. Doesn't delete files from the destination.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Sync(fdst, fsrc, false)
@@ -75,13 +74,13 @@ var Commands = []Command{
},
{
Name: "sync",
ArgsHelp: "source:path dest:path",
ArgsHelp: "source://path dest://path",
Help: `
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the --dry-run flag.`,
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the --dry-run flag.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Sync(fdst, fsrc, true)
if err != nil {
@@ -93,11 +92,11 @@ var Commands = []Command{
},
{
Name: "ls",
ArgsHelp: "[remote:path]",
ArgsHelp: "[remote://path]",
Help: `
List all the objects in the the path with size and path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.List(fdst, os.Stdout)
err := fs.List(fdst)
if err != nil {
log.Fatalf("Failed to list: %v", err)
}
@@ -107,11 +106,11 @@ var Commands = []Command{
},
{
Name: "lsd",
ArgsHelp: "[remote:path]",
ArgsHelp: "[remote://path]",
Help: `
List all directories/containers/buckets in the the path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.ListDir(fdst, os.Stdout)
err := fs.ListDir(fdst)
if err != nil {
log.Fatalf("Failed to listdir: %v", err)
}
@@ -121,12 +120,11 @@ var Commands = []Command{
},
{
Name: "lsl",
ArgsHelp: "[remote:path]",
ArgsHelp: "[remote://path]",
Help: `
List all the objects in the the path with modification time,
size and path.`,
List all the objects in the the path with modification time, size and path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.ListLong(fdst, os.Stdout)
err := fs.ListLong(fdst)
if err != nil {
log.Fatalf("Failed to list long: %v", err)
}
@@ -136,12 +134,11 @@ var Commands = []Command{
},
{
Name: "md5sum",
ArgsHelp: "[remote:path]",
ArgsHelp: "[remote://path]",
Help: `
Produces an md5sum file for all the objects in the path. This
is in the same format as the standard md5sum tool produces.`,
Produces an md5sum file for all the objects in the path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Md5sum(fdst, os.Stdout)
err := fs.Md5sum(fdst)
if err != nil {
log.Fatalf("Failed to list: %v", err)
}
@@ -151,7 +148,7 @@ var Commands = []Command{
},
{
Name: "mkdir",
ArgsHelp: "remote:path",
ArgsHelp: "remote://path",
Help: `
Make the path if it doesn't already exist`,
Run: func(fdst, fsrc fs.Fs) {
@@ -165,7 +162,7 @@ var Commands = []Command{
},
{
Name: "rmdir",
ArgsHelp: "remote:path",
ArgsHelp: "remote://path",
Help: `
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.`,
@@ -180,7 +177,7 @@ var Commands = []Command{
},
{
Name: "purge",
ArgsHelp: "remote:path",
ArgsHelp: "remote://path",
Help: `
Remove the path and all of its contents.`,
Run: func(fdst, fsrc fs.Fs) {
@@ -194,7 +191,7 @@ var Commands = []Command{
},
{
Name: "check",
ArgsHelp: "source:path dest:path",
ArgsHelp: "source://path dest://path",
Help: `
Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
@@ -243,8 +240,7 @@ Subcommands:
fmt.Fprintf(os.Stderr, "Options:\n")
pflag.PrintDefaults()
fmt.Fprintf(os.Stderr, `
It is only necessary to use a unique prefix of the subcommand, eg 'up'
for 'upload'.
It is only necessary to use a unique prefix of the subcommand, eg 'up' for 'upload'.
`)
}
@@ -269,11 +265,7 @@ func ParseFlags() {
fs.Stats.Error()
log.Fatal(err)
}
err = pprof.StartCPUProfile(f)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
}
@@ -327,9 +319,6 @@ func NewFs(remote string) fs.Fs {
// Print the stats every statsInterval
func StartStats() {
if *statsInterval <= 0 {
return
}
go func() {
ch := time.Tick(*statsInterval)
for {
@@ -347,17 +336,6 @@ func main() {
}
command, args := ParseCommand()
// Log file output
if *logFile != "" {
f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
log.Fatalf("Failed to open log file: %v", err)
}
f.Seek(0, os.SEEK_END)
log.SetOutput(f)
redirectStderr(f)
}
// Make source and destination fs
var fdst, fsrc fs.Fs
if len(args) >= 1 {
@@ -378,10 +356,10 @@ func main() {
if command.Run != nil {
command.Run(fdst, fsrc)
if !command.NoStats {
fmt.Fprintln(os.Stderr, fs.Stats)
fmt.Println(fs.Stats)
}
if fs.Config.Verbose {
fs.Debug(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
log.Printf("*** Go routines at exit %d\n", runtime.NumGoroutine())
}
if fs.Stats.Errored() {
os.Exit(1)

419
rclonetest/rclonetest.go Normal file
View File

@@ -0,0 +1,419 @@
// Test rclone by doing real transactions to a storage provider to and
// from the local disk
package main
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ogier/pflag"
// Active file systems
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
)
// Globals
var (
localName, remoteName string
version = pflag.BoolP("version", "V", false, "Print the version number")
subDir = pflag.BoolP("subdir", "S", false, "Test with a sub directory")
)
// Represents an item for checking
type Item struct {
Path string
Md5sum string
ModTime time.Time
Size int64
}
// Represents all items for checking
type Items struct {
byName map[string]*Item
items []Item
}
// Make an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[items[i].Path] = &items[i]
}
return is
}
// Check off an item
func (is *Items) Find(obj fs.Object) {
i, ok := is.byName[obj.Remote()]
if !ok {
log.Fatalf("Unexpected file %q", obj.Remote())
}
delete(is.byName, obj.Remote())
// Check attributes
Md5sum, err := obj.Md5sum()
if err != nil {
log.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
}
if i.Md5sum != Md5sum {
log.Fatalf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
}
if i.Size != obj.Size() {
log.Fatalf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
}
// check the mod time to the given precision
modTime := obj.ModTime()
dt := modTime.Sub(i.ModTime)
if dt >= fs.Config.ModifyWindow || dt <= -fs.Config.ModifyWindow {
log.Fatalf("%s: Modification time difference too big |%s| > %s (%s vs %s)", obj.Remote(), dt, fs.Config.ModifyWindow, modTime, i.ModTime)
}
}
// Check all done
func (is *Items) Done() {
if len(is.byName) != 0 {
for name := range is.byName {
log.Printf("Not found %q", name)
}
log.Fatalf("%d objects not found", len(is.byName))
}
}
// Checks the fs to see if it has the expected contents
func CheckListing(f fs.Fs, items []Item) {
is := NewItems(items)
for obj := range f.List() {
is.Find(obj)
}
is.Done()
}
// Parse a time string or explode
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// Write a file
func WriteFile(filePath, content string, t time.Time) {
// FIXME make directories?
filePath = path.Join(localName, filePath)
dirPath := path.Dir(filePath)
err := os.MkdirAll(dirPath, 0770)
if err != nil {
log.Fatalf("Failed to make directories %q: %v", dirPath, err)
}
err = ioutil.WriteFile(filePath, []byte(content), 0600)
if err != nil {
log.Fatalf("Failed to write file %q: %v", filePath, err)
}
err = os.Chtimes(filePath, t, t)
if err != nil {
log.Fatalf("Failed to chtimes file %q: %v", filePath, err)
}
}
// Create a random string
func RandomString(n int) string {
source := "abcdefghijklmnopqrstuvwxyz0123456789"
out := make([]byte, n)
for i := range out {
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
func TestMkdir(flocal, fremote fs.Fs) {
err := fs.Mkdir(fremote)
if err != nil {
log.Fatalf("Mkdir failed: %v", err)
}
items := []Item{}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
var t1 = Time("2001-02-03T04:05:06.499999999Z")
var t2 = Time("2011-12-25T12:59:59.123456789Z")
var t3 = Time("2011-12-30T12:59:59.000000000Z")
func TestCopy(flocal, fremote fs.Fs) {
WriteFile("sub dir/hello world", "hello world", t1)
// Check dry run is working
log.Printf("Copy with --dry-run")
fs.Config.DryRun = true
err := fs.Sync(fremote, flocal, false)
fs.Config.DryRun = false
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
items := []Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
CheckListing(flocal, items)
CheckListing(fremote, []Item{})
// Now without dry run
log.Printf("Copy")
err = fs.Sync(fremote, flocal, false)
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// Now delete the local file and download it
err = os.Remove(localName + "/sub dir/hello world")
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
CheckListing(flocal, []Item{})
CheckListing(fremote, items)
log.Printf("Copy - redownload")
err = fs.Sync(flocal, fremote, false)
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// Clean the directory
cleanTempDir()
}
func TestSync(flocal, fremote fs.Fs) {
WriteFile("empty space", "", t1)
log.Printf("Sync after changing file modtime only")
err := os.Chtimes(localName+"/empty space", t2, t2)
if err != nil {
log.Fatalf("Chtimes failed: %v", err)
}
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items := []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after adding a file")
WriteFile("potato", "------------------------------------------------------------", t3)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after changing a file's size only")
WriteFile("potato", "smaller but same date", t3)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after removing a file and adding a file --dry-run")
WriteFile("potato2", "------------------------------------------------------------", t1)
err = os.Remove(localName + "/potato")
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
fs.Config.DryRun = true
err = fs.Sync(fremote, flocal, true)
fs.Config.DryRun = false
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
before := []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, before)
log.Printf("Sync after removing a file and adding a file")
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
func TestLs(flocal, fremote fs.Fs) {
// Underlying List has been tested above, so we just make sure it runs
err := fs.List(fremote)
if err != nil {
log.Fatalf("List failed: %v", err)
}
}
func TestLsd(flocal, fremote fs.Fs) {
}
func TestCheck(flocal, fremote fs.Fs) {
}
func TestPurge(fremote fs.Fs) {
err := fs.Purge(fremote)
if err != nil {
log.Fatalf("Purge failed: %v", err)
}
unexpected := 0
for obj := range fremote.List() {
unexpected++
log.Printf("Found unexpected item %s", obj.Remote())
}
if unexpected != 0 {
log.Fatalf("exiting as found %d unexpected items", unexpected)
}
}
func TestRmdir(flocal, fremote fs.Fs) {
err := fs.Rmdir(fremote)
if err != nil {
log.Fatalf("Rmdir failed: %v", err)
}
}
func syntaxError() {
fmt.Fprintf(os.Stderr, `Test rclone with a remote to find bugs in either - %s.
Syntax: [options] remote:
Need a remote: as argument. This will create a random container or
directory under it and perform tests on it, deleting it at the end.
Options:
`, fs.Version)
pflag.PrintDefaults()
}
// Clean the temporary directory
func cleanTempDir() {
log.Printf("Cleaning temporary directory: %q", localName)
err := os.RemoveAll(localName)
if err != nil {
log.Printf("Failed to remove %q: %v", localName, err)
}
}
func main() {
pflag.Usage = syntaxError
pflag.Parse()
if *version {
fmt.Printf("rclonetest %s\n", fs.Version)
os.Exit(0)
}
fs.LoadConfig()
rand.Seed(time.Now().UnixNano())
args := pflag.Args()
if len(args) != 1 {
syntaxError()
os.Exit(1)
}
remoteName = args[0]
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
remoteName += RandomString(32)
var parentRemote fs.Fs
if *subDir {
var err error
parentRemote, err = fs.NewFs(remoteName)
if err != nil {
log.Fatalf("Failed to make parent %q: %v", remoteName, err)
}
remoteName += "/" + RandomString(8)
}
log.Printf("Testing with remote %q", remoteName)
var err error
localName, err = ioutil.TempDir("", "rclone")
if err != nil {
log.Fatalf("Failed to create temp dir: %v", err)
}
log.Printf("Testing with local %q", localName)
fremote, err := fs.NewFs(remoteName)
if err != nil {
log.Fatalf("Failed to make %q: %v", remoteName, err)
}
flocal, err := fs.NewFs(localName)
if err != nil {
log.Fatalf("Failed to make %q: %v", remoteName, err)
}
fs.CalculateModifyWindow(fremote, flocal)
TestMkdir(flocal, fremote)
TestCopy(flocal, fremote)
TestSync(flocal, fremote)
TestLs(flocal, fremote)
TestLsd(flocal, fremote)
TestCheck(flocal, fremote)
TestPurge(fremote)
//TestRmdir(flocal, fremote)
if parentRemote != nil {
TestPurge(parentRemote)
}
cleanTempDir()
log.Printf("Tests OK")
}

25
rclonetest/test.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
go install
REMOTES="
memstore:
s3:
drive2:
gcs:
dropbox:
/tmp/z
"
function test_remote {
args=$@
rclonetest $args || {
echo "*** rclonetest $args FAILED ***"
exit 1
}
}
for remote in $REMOTES; do
test_remote $remote
test_remote --subdir $remote
done

View File

@@ -1,15 +0,0 @@
// Log the panic to the log file - for oses which can't do this
//+build !windows,!unix
package main
import (
"log"
"os"
)
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
log.Printf("Can't redirect stderr to file")
}

View File

@@ -1,19 +0,0 @@
// Log the panic under unix to the log file
//+build unix
package main
import (
"log"
"os"
"syscall"
)
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
err := syscall.Dup2(int(f.Fd()), int(os.Stderr.Fd()))
if err != nil {
log.Fatalf("Failed to redirect stderr to file: %v", err)
}
}

View File

@@ -1,39 +0,0 @@
// Log the panic under windows to the log file
//
// Code from minix, via
//
// http://play.golang.org/p/kLtct7lSUg
//+build windows
package main
import (
"log"
"os"
"syscall"
)
var (
kernel32 = syscall.MustLoadDLL("kernel32.dll")
procSetStdHandle = kernel32.MustFindProc("SetStdHandle")
)
func setStdHandle(stdhandle int32, handle syscall.Handle) error {
r0, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
if r0 == 0 {
if e1 != 0 {
return error(e1)
}
return syscall.EINVAL
}
return nil
}
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
err := setStdHandle(syscall.STD_ERROR_HANDLE, syscall.Handle(f.Fd()))
if err != nil {
log.Fatalf("Failed to redirect stderr to file: %v", err)
}
}

108
s3/s3.go
View File

@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
"mime"
"net/http"
"path"
"regexp"
@@ -100,8 +101,7 @@ func init() {
// Constants
const (
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
listChunkSize = 1024 // number of items to read at once
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
)
// FsS3 represents a remote s3 server
@@ -184,7 +184,6 @@ func s3Connection(name string) (*s3.S3, error) {
}
c := s3.New(auth, region)
c.Client = fs.Config.Client()
return c, nil
}
@@ -228,7 +227,7 @@ func NewFs(name, root string) (fs.Fs, error) {
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
func (f *FsS3) NewFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
o := &FsObjectS3{
s3: f,
remote: remote,
@@ -257,7 +256,7 @@ func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
//
// May return nil if an error occurred
func (f *FsS3) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
return f.NewFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
@@ -268,45 +267,32 @@ func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
if directories {
delimiter = "/"
}
marker := ""
for {
objects, err := f.b.List(f.root, delimiter, marker, listChunkSize)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
} else {
rootLength := len(f.root)
if directories {
for _, remote := range objects.CommonPrefixes {
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
continue
}
remote := remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
fn(remote, &s3.Key{Key: remote})
}
} else {
for i := range objects.Contents {
object := &objects.Contents[i]
if !strings.HasPrefix(object.Key, f.root) {
fs.Log(f, "Odd name received %q", object.Key)
continue
}
remote := object.Key[rootLength:]
fn(remote, object)
// FIXME need to implement ALL loop
objects, err := f.b.List(f.root, delimiter, "", 10000)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
} else {
rootLength := len(f.root)
if directories {
for _, remote := range objects.CommonPrefixes {
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
continue
}
remote := remote[rootLength:]
fn(remote, &s3.Key{Key: remote})
}
} else {
for i := range objects.Contents {
object := &objects.Contents[i]
if !strings.HasPrefix(object.Key, f.root) {
fs.Log(f, "Odd name received %q", object.Key)
continue
}
remote := object.Key[rootLength:]
fn(remote, object)
}
}
if !objects.IsTruncated {
break
}
// Use NextMarker if set, otherwise use last Key
marker = objects.NextMarker
if marker == "" {
marker = objects.Contents[len(objects.Contents)-1].Key
}
}
}
@@ -323,7 +309,7 @@ func (f *FsS3) List() fs.ObjectsChan {
go func() {
defer close(out)
f.list(false, func(remote string, object *s3.Key) {
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
@@ -420,17 +406,9 @@ func (o *FsObjectS3) Remote() string {
return o.remote
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *FsObjectS3) Md5sum() (string, error) {
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(etag) {
fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
return "", nil
}
return etag, nil
return strings.Trim(strings.ToLower(o.etag), `"`), nil
}
// Size returns the size of an object in bytes
@@ -440,28 +418,13 @@ func (o *FsObjectS3) Size() int64 {
// readMetaData gets the metadata if it hasn't already been fetched
//
// if we get a 404 error then we retry a few times for eventual
// consistency reasons
//
// it also sets the info
func (o *FsObjectS3) readMetaData() (err error) {
if o.meta != nil {
return nil
}
var headers s3.Headers
// Try reading the metadata a few times (with exponential
// backoff) to get around eventual consistency on 404 error
for tries := uint(0); tries < 10; tries++ {
headers, err = o.s3.b.Head(o.s3.root+o.remote, nil)
if s3Err, ok := err.(*s3.Error); ok {
if s3Err.StatusCode == http.StatusNotFound {
time.Sleep(5 * time.Millisecond << tries)
continue
}
}
break
}
headers, err := o.s3.b.Head(o.s3.root+o.remote, nil)
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
@@ -544,12 +507,17 @@ func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
metaMtime: swift.TimeToFloatString(modTime),
}
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, fs.MimeType(o), o.s3.perm, headers)
// Guess the content type
contentType := mime.TypeByExtension(path.Ext(o.remote))
if contentType == "" {
contentType = "application/octet-stream"
}
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, contentType, o.s3.perm, headers)
if err != nil {
return err
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData()
return err
}

View File

@@ -1,53 +0,0 @@
// Test S3 filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: go run gen_tests.go or make gen_tests
package s3_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/s3"
)
func init() {
fstests.NilObject = fs.Object((*s3.FsObjectS3)(nil))
fstests.RemoteName = "TestS3:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -44,12 +44,6 @@ func init() {
Help: "Memset Memstore UK v2",
Value: "https://auth.storage.memset.com/v2.0",
}},
}, {
Name: "tenant",
Help: "Tenant name - optional",
}, {
Name: "region",
Help: "Region name - optional",
},
// snet = flag.Bool("swift-snet", false, "Use internal service network") // FIXME not implemented
},
@@ -113,15 +107,10 @@ func swiftConnection(name string) (*swift.Connection, error) {
return nil, errors.New("auth not found")
}
c := &swift.Connection{
UserName: userName,
ApiKey: apiKey,
AuthUrl: authUrl,
UserAgent: fs.UserAgent,
Tenant: fs.ConfigFile.MustValue(name, "tenant"),
Region: fs.ConfigFile.MustValue(name, "region"),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fs.Config.Transport(),
UserName: userName,
ApiKey: apiKey,
AuthUrl: authUrl,
UserAgent: fs.UserAgent,
}
err := c.Authenticate()
if err != nil {
@@ -168,7 +157,7 @@ func NewFs(name, root string) (fs.Fs, error) {
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsSwift) newFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
func (f *FsSwift) NewFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
fs := &FsObjectSwift{
swift: f,
remote: remote,
@@ -190,7 +179,7 @@ func (f *FsSwift) newFsObjectWithInfo(remote string, info *swift.Object) fs.Obje
//
// May return nil if an error occurred
func (f *FsSwift) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
return f.NewFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
@@ -212,11 +201,8 @@ func (f *FsSwift) list(directories bool, fn func(string, *swift.Object)) {
for i := range objects {
object := &objects[i]
// FIXME if there are no directories, swift gives back the files for some reason!
if directories {
if !strings.HasSuffix(object.Name, "/") {
continue
}
object.Name = object.Name[:len(object.Name)-1]
if directories && !strings.HasSuffix(object.Name, "/") {
continue
}
if !strings.HasPrefix(object.Name, f.root) {
fs.Log(f, "Odd name received %q", object.Name)
@@ -247,7 +233,7 @@ func (f *FsSwift) List() fs.ObjectsChan {
go func() {
defer close(out)
f.list(false, func(remote string, object *swift.Object) {
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
@@ -427,7 +413,6 @@ func (o *FsObjectSwift) Update(in io.Reader, modTime time.Time, size int64) erro
return err
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData()
return err
}

View File

@@ -1,53 +0,0 @@
// Test Swift filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: go run gen_tests.go or make gen_tests
package swift_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/swift"
)
func init() {
fstests.NilObject = fs.Object((*swift.FsObjectSwift)(nil))
fstests.RemoteName = "TestSwift:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }