1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-15 15:53:41 +00:00

Compare commits

..

1 Commits
v1.23 ... v1.02

Author SHA1 Message Date
Nick Craig-Wood
094cc27621 Version 1.02 2014-07-19 13:12:20 +01:00
83 changed files with 2464 additions and 10381 deletions

5
.gitignore vendored
View File

@@ -4,7 +4,6 @@ rclone
rclonetest/rclonetest
build
docs/public
MANUAL.md
MANUAL.html
MANUAL.txt
README.html
README.txt
rclone.1

View File

@@ -1,12 +1,11 @@
language: go
sudo: false
go:
- 1.3.3
- 1.4.2
- 1.5
- 1.1.2
- 1.2.2
- 1.3
- tip
script:
- go get ./...
- go test -v ./...
- go test -cpu=2 -race -v ./...

View File

@@ -2,51 +2,39 @@ TAG := $(shell git describe --tags)
LAST_TAG := $(shell git describe --tags --abbrev=0)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
rclone:
rclone: *.go */*.go
@go version
go install -v ./...
go build
test: rclone
go test ./...
cd fs && ./test_all.sh
doc: rclone.1 README.html README.txt
check: rclone
go vet ./...
errcheck ./...
golint ./...
rclone.1: README.md
pandoc -s --from markdown --to man README.md -o rclone.1
doc: rclone.1 MANUAL.html MANUAL.txt
README.html: README.md
pandoc -s --from markdown_github --to html README.md -o README.html
rclone.1: MANUAL.md
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
MANUAL.md: make_manual.py docs/content/*.md
./make_manual.py
MANUAL.html: MANUAL.md
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
README.txt: README.md
pandoc -s --from markdown_github --to plain README.md -o README.txt
install: rclone
install -d ${DESTDIR}/usr/bin
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
install -t ${DESTDIR}/usr/bin rclone
clean:
go clean ./...
find . -name \*~ | xargs -r rm -f
rm -rf build docs/public
rm -f rclone rclonetest/rclonetest rclone.1 MANUAL.md MANUAL.html MANUAL.txt
rm -f rclone rclonetest/rclonetest rclone.1 README.html README.txt
website:
cd docs && hugo
upload_website: website
rclone -v sync docs/public memstore:www-rclone-org
./rclone -v sync docs/public memstore:www-rclone-org
upload:
rclone -v copy build/ memstore:downloads-rclone-org
./rclone -v copy build/ memstore:downloads-rclone-org
cross: doc
./cross-compile $(TAG)
@@ -57,18 +45,15 @@ serve:
tag:
@echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEW_TAG)"
echo -e "package fs\n\n// Version of rclone\nconst Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
echo -e "package fs\n const Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
git tag $(NEW_TAG)
@echo "Add this to changelog in docs/content/changelog.md"
@echo "Add this to changelog in README.md"
@echo " * $(NEW_TAG) -" `date -I`
@git log $(LAST_TAG)..$(NEW_TAG) --oneline
@echo "Then commit the changes"
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo git commit -m "Version $(NEW_TAG)" -a -v
@echo "And finally run make retag before make cross etc"
retag:
git tag -f $(LAST_TAG)
gen_tests:
cd fstest/fstests && go generate

307
README.md
View File

@@ -1,13 +1,12 @@
% rclone(1) User Manual
% Nick Craig-Wood
% Jul 7, 2014
Rclone
======
[![Logo](http://rclone.org/img/rclone-120x120.png)](http://rclone.org/)
[Website](http://rclone.org) |
[Documentation](http://rclone.org/docs/) |
[Changelog](http://rclone.org/changelog/) |
[Installation](http://rclone.org/install/) |
[G+](https://google.com/+RcloneOrg)
[![Build Status](https://travis-ci.org/ncw/rclone.png)](https://travis-ci.org/ncw/rclone) [![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
Rclone is a command line program to sync files and directories to and from
* Google Drive
@@ -15,7 +14,6 @@ Rclone is a command line program to sync files and directories to and from
* Openstack Swift / Rackspace cloud files / Memset Memstore
* Dropbox
* Google Cloud Storage
* Amazon Cloud Drive
* The local filesystem
Features
@@ -28,13 +26,300 @@ Features
* Check mode to check all MD5SUMs
* Can sync to and from network, eg two different Drive accounts
See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
See the Home page for more documentation and configuration walkthroughs.
* http://rclone.org/
Install
-------
Rclone is a Go program and comes as a single binary file.
Download the binary for your OS from
* http://rclone.org/downloads/
Or alternatively if you have Go installed use
go install github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`.
Configure
---------
First you'll need to configure rclone. As the object storage systems
have quite complicated authentication these are kept in a config file
`.rclone.conf` in your home directory by default. (You can use the
`--config` option to choose a different config file.)
The easiest way to make the config is to run rclone with the config
option, Eg
rclone config
Usage
-----
Rclone syncs a directory tree from local to remote.
Its basic syntax is
Syntax: [options] subcommand <parameters> <parameters...>
See below for how to specify the source and destination paths.
Subcommands
-----------
rclone copy source:path dest:path
Copy the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Doesn't delete files from the destination.
rclone sync source:path dest:path
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the `--dry-run` flag.
rclone ls [remote:path]
List all the objects in the the path with sizes.
rclone lsl [remote:path]
List all the objects in the the path with sizes and timestamps.
rclone lsd [remote:path]
List all directories/objects/buckets in the the path.
rclone mkdir remote:path
Make the path if it doesn't already exist
rclone rmdir remote:path
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.
rclone purge remote:path
Remove the path and all of its contents.
rclone check source:path dest:path
Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
rclone md5sum remote:path
Produces an md5sum file for all the objects in the path. This is in
the same format as the standard md5sum tool produces.
General options:
```
--checkers=8: Number of checkers to run in parallel.
--config="~/.rclone.conf": Config file.
-n, --dry-run=false: Do a trial run with no permanent changes
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats
--transfers=4: Number of file transfers to run in parallel.
-v, --verbose=false: Print lots more stuff
```
Developer options:
```
--cpuprofile="": Write cpu profile to file
```
Local Filesystem
----------------
Paths are specified as normal filesystem paths, so
rclone sync /home/source /tmp/destination
Will sync `/home/source` to `/tmp/destination`
Swift / Rackspace cloudfiles / Memset Memstore
----------------------------------------------
Paths are specified as remote:container (or remote: for the `lsd`
command.) You may put subdirectories in too, eg
`remote:container/path/to/dir`.
So to copy a local directory to a swift container called backup:
rclone sync /home/source swift:backup
The modified time is stored as metadata on the object as
`X-Object-Meta-Mtime` as floating point since the epoch.
This is a defacto standard (used in the official python-swiftclient
amongst others) for storing the modification time (as read using
os.Stat) for an object.
Amazon S3
---------
Paths are specified as remote:bucket. You may put subdirectories in
too, eg `remote:bucket/path/to/dir`.
So to copy a local directory to a s3 container called backup
rclone sync /home/source s3:backup
The modified time is stored as metadata on the object as
`X-Amz-Meta-Mtime` as floating point since the epoch.
Google drive
------------
Paths are specified as remote:path Drive paths may be as deep as required.
The initial setup for drive involves getting a token from Google drive
which you need to do in your browser. `rclone config` walks you
through it.
To copy a local directory to a drive directory called backup
rclone copy /home/source remote:backup
Google drive stores modification times accurate to 1 ms natively.
Dropbox
-------
Paths are specified as remote:path Dropbox paths may be as deep as required.
The initial setup for dropbox involves getting a token from Dropbox
which you need to do in your browser. `rclone config` walks you
through it.
To copy a local directory to a drive directory called backup
rclone copy /home/source dropbox:backup
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
a Dropbox datastore called "rclone". Dropbox datastores are limited
to 100,000 rows so this is the maximum number of files rclone can
manage on Dropbox.
Google Cloud Storage
--------------------
Paths are specified as remote:path Google Cloud Storage paths may be
as deep as required.
The initial setup for Google Cloud Storage involves getting a token
from Google which you need to do in your browser. `rclone config`
walks you through it.
To copy a local directory to a google cloud storage directory called backup
rclone copy /home/source remote:backup
Google google cloud storage stores md5sums natively and rclone stores
modification times as metadata on the object, under the "mtime" key in
RFC3339 format accurate to 1ns.
Single file copies
------------------
Rclone can copy single files
rclone src:path/to/file dst:path/dir
Or
rclone src:path/to/file dst:path/to/file
Note that you can't rename the file if you are copying from one file to another.
License
-------
This is free software under the terms of MIT the license (check the
COPYING file included in this package).
Bugs
----
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work
Changelog
---------
* v1.02 - 2014-07-19
* Implement Dropbox remote
* Implement Google Cloud Storage remote
* Verify Md5sums and Sizes after copies
* Remove times from "ls" command - lists sizes only
* Add add "lsl" - lists times and sizes
* Add "md5sum" command
* v1.01 - 2014-07-04
* drive: fix transfer of big files using up lots of memory
* v1.00 - 2014-07-03
* drive: fix whole second dates
* v0.99 - 2014-06-26
* Fix --dry-run not working
* Make compatible with go 1.1
* v0.98 - 2014-05-30
* s3: Treat missing Content-Length as 0 for some ceph installations
* rclonetest: add file with a space in
* v0.97 - 2014-05-05
* Implement copying of single files
* s3 & swift: support paths inside containers/buckets
* v0.96 - 2014-04-24
* drive: Fix multiple files of same name being created
* drive: Use o.Update and fs.Put to optimise transfers
* Add version number, -V and --version
* v0.95 - 2014-03-28
* rclone.org: website, docs and graphics
* drive: fix path parsing
* v0.94 - 2014-03-27
* Change remote format one last time
* GNU style flags
* v0.93 - 2014-03-16
* drive: store token in config file
* cross compile other versions
* set strict permissions on config file
* v0.92 - 2014-03-15
* Config fixes and --config option
* v0.91 - 2014-03-15
* Make config file
* v0.90 - 2013-06-27
* Project named rclone
* v0.00 - 2012-11-18
* Project started
Contact and support
-------------------
The project website is at:
* https://github.com/ncw/rclone
There you can file bug reports, ask for help or send pull requests.
Authors
-------
* Nick Craig-Wood <nick@craig-wood.com>
Contributors
------------
* Your name goes here!

View File

@@ -1,22 +0,0 @@
Required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages
* [gox](https://github.com/mitchellh/gox) for cross compiling
* Run `gox -build-toolchain`
* This assumes you have your own source checkout
* pandoc for making the html and man pages
* errcheck - go get github.com/kisielk/errcheck
* golint - go get github.com/golang/lint
Making a release
* go get -u -f -v ./...
* make check
* make test
* make tag
* edit docs/content/changelog.md
* git commit -a -v
* make retag
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross - not required for go >= 1.5
* make cross
* make upload
* make upload_website
* git push --tags origin master

View File

@@ -1,729 +0,0 @@
// Package amazonclouddrive provides an interface to the Amazon Cloud
// Drive object storage system.
package amazonclouddrive
/*
FIXME make searching for directory in id and file in id more efficient
- use the name: search parameter - remember the escaping rules
- use Folder GetNode and GetFile
FIXME make the default for no files and no dirs be (FILE & FOLDER) so
we ignore assets completely!
*/
import (
"fmt"
"io"
"log"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"sync"
"time"
"github.com/ncw/go-acd"
"github.com/ncw/rclone/dircache"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/pacer"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "amzn1.application-oa2-client.6bf18d2d1f5b485c94c8988bb03ad0e7"
rcloneClientSecret = "k8/NyszKm5vEkZXAwsbGkd6C3NrbjIqMg4qEhIeF14Szub2wur+/teS3ubXgsLe9//+tr/qoqK+lq6mg8vWkoA=="
folderKind = "FOLDER"
fileKind = "FILE"
assetKind = "ASSET"
statusAvailable = "AVAILABLE"
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time.Millisecond
)
// Globals
var (
// Description of how to auth for this app
acdConfig = &oauth2.Config{
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
Endpoint: oauth2.Endpoint{
AuthURL: "https://www.amazon.com/ap/oa",
TokenURL: "https://api.amazon.com/auth/o2/token",
},
ClientID: rcloneClientID,
ClientSecret: fs.Reveal(rcloneClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "amazon cloud drive",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config(name, acdConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: oauthutil.ConfigClientID,
Help: "Amazon Application Client Id - leave blank normally.",
}, {
Name: oauthutil.ConfigClientSecret,
Help: "Amazon Application Client Secret - leave blank normally.",
}},
})
}
// FsAcd represents a remote acd server
type FsAcd struct {
name string // name of this remote
c *acd.Client // the connection to the acd server
root string // the path we are working on
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
}
// FsObjectAcd describes a acd object
//
// Will definitely have info but maybe not meta
type FsObjectAcd struct {
acd *FsAcd // what this object is part of
remote string // The remote path
info *acd.Node // Info from the acd object if known
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *FsAcd) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *FsAcd) Root() string {
return f.root
}
// String converts this FsAcd to a string
func (f *FsAcd) String() string {
return fmt.Sprintf("Amazon cloud drive root '%s'", f.root)
}
// Pattern to match a acd path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parsePath parses an acd 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
409, // Conflict - happens in the unit tests a lot
503, // Service Unavailable
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
// See if HTTP error code is to be retried
if err != nil && resp != nil {
for _, e := range retryErrorCodes {
if resp.StatusCode == e {
return true, err
}
}
}
// Allow retry if request times out. Adapted from
// http://stackoverflow.com/questions/23494950/specifically-check-for-timeout-error
switch err := err.(type) {
case *url.Error:
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
return true, err
}
case net.Error:
if err.Timeout() {
return true, err
}
}
return false, err
}
// NewFs constructs an FsAcd from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
root = parsePath(root)
oAuthClient, err := oauthutil.NewClient(name, acdConfig)
if err != nil {
log.Fatalf("Failed to configure amazon cloud drive: %v", err)
}
c := acd.NewClient(oAuthClient)
c.UserAgent = fs.UserAgent
f := &FsAcd{
name: name,
root: root,
c: c,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
}
// Update endpoints
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
_, resp, err = f.c.Account.GetEndpoints()
return shouldRetry(resp, err)
})
if err != nil {
return nil, fmt.Errorf("Failed to get endpoints: %v", err)
}
// Get rootID
var rootInfo *acd.Folder
err = f.pacer.Call(func() (bool, error) {
rootInfo, resp, err = f.c.Nodes.GetRoot()
return shouldRetry(resp, err)
})
if err != nil || rootInfo.Id == nil {
return nil, fmt.Errorf("Failed to get root: %v", err)
}
f.dirCache = dircache.New(root, *rootInfo.Id, f)
// Find the current root
err = f.dirCache.FindRoot(false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
newF := *f
newF.dirCache = dircache.New(newRoot, *rootInfo.Id, &newF)
newF.root = newRoot
// Make new Fs which is the parent
err = newF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
obj := newF.newFsObjectWithInfo(remote, nil)
if obj == nil {
// File doesn't exist so return old f
return f, nil
}
// return a Fs Limited to this object
return fs.NewLimited(&newF, obj), nil
}
return f, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsAcd) newFsObjectWithInfo(remote string, info *acd.Node) fs.Object {
o := &FsObjectAcd{
acd: f,
remote: remote,
}
if info != nil {
// Set info but not meta
o.info = info
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
// logged already FsDebug("Failed to read info: %s", err)
return nil
}
}
return o
}
// NewFsObject returns an FsObject from a path
//
// May return nil if an error occurred
func (f *FsAcd) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *FsAcd) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
//fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response
var subFolder *acd.Folder
err = f.pacer.Call(func() (bool, error) {
subFolder, resp, err = folder.GetFolder(leaf)
return shouldRetry(resp, err)
})
if err != nil {
if err == acd.ErrorNodeNotFound {
//fs.Debug(f, "...Not found")
return "", false, nil
}
//fs.Debug(f, "...Error %v", err)
return "", false, err
}
if subFolder.Status != nil && *subFolder.Status != statusAvailable {
fs.Debug(f, "Ignoring folder %q in state %q", *subFolder.Status)
time.Sleep(1 * time.Second) // FIXME wait for problem to go away!
return "", false, nil
}
//fs.Debug(f, "...Found(%q, %v)", *subFolder.Id, leaf)
return *subFolder.Id, true, nil
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *FsAcd) CreateDir(pathID, leaf string) (newID string, err error) {
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response
var info *acd.Folder
err = f.pacer.Call(func() (bool, error) {
info, resp, err = folder.CreateFolder(leaf)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
return "", err
}
//fmt.Printf("...Id %q\n", *info.Id)
return *info.Id, nil
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func(*acd.Node) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *FsAcd) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
query := "parents:" + dirID
if directoriesOnly {
query += " AND kind:" + folderKind
} else if filesOnly {
query += " AND kind:" + fileKind
} else {
// FIXME none of these work
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
}
opts := acd.NodeListOptions{
Filters: query,
}
var nodes []*acd.Node
//var resp *http.Response
OUTER:
for {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
return shouldRetry(resp, err)
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list files: %v", err)
break
}
if nodes == nil {
break
}
for _, node := range nodes {
if node.Name != nil && node.Id != nil && node.Kind != nil && node.Status != nil {
// Ignore nodes if not AVAILABLE
if *node.Status != statusAvailable {
continue
}
if fn(node) {
found = true
break OUTER
}
}
}
}
return
}
// Path should be directory path either "" or "path/"
//
// List the directory using a recursive list from the root
//
// This fetches the minimum amount of stuff but does more API calls
// which makes it slow
func (f *FsAcd) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
var subError error
// Make the API request
var wg sync.WaitGroup
_, err := f.listAll(dirID, "", false, false, func(node *acd.Node) bool {
// Recurse on directories
switch *node.Kind {
case folderKind:
wg.Add(1)
folder := path + *node.Name + "/"
fs.Debug(f, "Reading %s", folder)
go func() {
defer wg.Done()
err := f.listDirRecursive(*node.Id, folder, out)
if err != nil {
subError = err
fs.ErrorLog(f, "Error reading %s:%s", folder, err)
}
}()
return false
case fileKind:
if fs := f.newFsObjectWithInfo(path+*node.Name, node); fs != nil {
out <- fs
}
default:
// ignore ASSET etc
}
return false
})
wg.Wait()
fs.Debug(f, "Finished reading %s", path)
if err != nil {
return err
}
if subError != nil {
return subError
}
return nil
}
// List walks the path returning a channel of FsObjects
func (f *FsAcd) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
defer close(out)
err := f.dirCache.FindRoot(false)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "List failed: %s", err)
}
}
}()
return out
}
// ListDir lists the directories
func (f *FsAcd) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
err := f.dirCache.FindRoot(false)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
_, err := f.listAll(f.dirCache.RootID(), "", true, false, func(item *acd.Node) bool {
dir := &fs.Dir{
Name: *item.Name,
Bytes: -1,
Count: -1,
}
dir.When, _ = time.Parse(timeFormat, *item.ModifiedDate)
out <- dir
return false
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "ListDir failed: %s", err)
}
}
}()
return out
}
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *FsAcd) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction
o := &FsObjectAcd{
acd: f,
remote: remote,
}
leaf, directoryID, err := f.dirCache.FindPath(remote, true)
if err != nil {
return nil, err
}
folder := acd.FolderFromId(directoryID, o.acd.c.Nodes)
var info *acd.File
var resp *http.Response
err = f.pacer.CallNoRetry(func() (bool, error) {
if size != 0 {
info, resp, err = folder.Put(in, leaf)
} else {
info, resp, err = folder.PutSized(in, size, leaf)
}
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
o.info = info.Node
return o, nil
}
// Mkdir creates the container if it doesn't exist
func (f *FsAcd) Mkdir() error {
return f.dirCache.FindRoot(true)
}
// purgeCheck remotes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *FsAcd) purgeCheck(check bool) error {
if f.root == "" {
return fmt.Errorf("Can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(false)
if err != nil {
return err
}
rootID := dc.RootID()
if check {
// check directory is empty
empty := true
_, err := f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
switch *node.Kind {
case folderKind:
empty = false
return true
case fileKind:
empty = false
return true
default:
fs.Debug("Found ASSET %s", *node.Id)
}
return false
})
if err != nil {
return err
}
if !empty {
return fmt.Errorf("Directory not empty")
}
}
node := acd.NodeFromId(rootID, f.c.Nodes)
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = node.Trash()
return shouldRetry(resp, err)
})
if err != nil {
return err
}
f.dirCache.ResetRoot()
if err != nil {
return err
}
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *FsAcd) Rmdir() error {
return f.purgeCheck(true)
}
// Precision return the precision of this Fs
func (f *FsAcd) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
//func (f *FsAcd) Copy(src fs.Object, remote string) (fs.Object, error) {
// srcObj, ok := src.(*FsObjectAcd)
// if !ok {
// fs.Debug(src, "Can't copy - not same remote type")
// return nil, fs.ErrorCantCopy
// }
// srcFs := srcObj.acd
// _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
// if err != nil {
// return nil, err
// }
// return f.NewFsObject(remote), nil
//}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *FsAcd) Purge() error {
return f.purgeCheck(false)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *FsObjectAcd) Fs() fs.Fs {
return o.acd
}
// Return a string version
func (o *FsObjectAcd) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *FsObjectAcd) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *FsObjectAcd) Md5sum() (string, error) {
if o.info.ContentProperties.Md5 != nil {
return *o.info.ContentProperties.Md5, nil
}
return "", nil
}
// Size returns the size of an object in bytes
func (o *FsObjectAcd) Size() int64 {
return int64(*o.info.ContentProperties.Size)
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *FsObjectAcd) readMetaData() (err error) {
if o.info != nil {
return nil
}
leaf, directoryID, err := o.acd.dirCache.FindPath(o.remote, false)
if err != nil {
return err
}
folder := acd.FolderFromId(directoryID, o.acd.c.Nodes)
var resp *http.Response
var info *acd.File
err = o.acd.pacer.Call(func() (bool, error) {
info, resp, err = folder.GetFile(leaf)
return shouldRetry(resp, err)
})
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
}
o.info = info.Node
return nil
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *FsObjectAcd) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate)
if err != nil {
fs.Log(o, "Failed to read mtime from object: %s", err)
return time.Now()
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *FsObjectAcd) SetModTime(modTime time.Time) {
// FIXME not implemented
return
}
// Storable returns a boolean showing whether this object storable
func (o *FsObjectAcd) Storable() bool {
return true
}
// Open an object for read
func (o *FsObjectAcd) Open() (in io.ReadCloser, err error) {
file := acd.File{Node: o.info}
var resp *http.Response
err = o.acd.pacer.Call(func() (bool, error) {
in, resp, err = file.Open()
return shouldRetry(resp, err)
})
return in, err
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *FsObjectAcd) Update(in io.Reader, modTime time.Time, size int64) error {
file := acd.File{Node: o.info}
var info *acd.File
var resp *http.Response
var err error
err = o.acd.pacer.CallNoRetry(func() (bool, error) {
if size != 0 {
info, resp, err = file.OverwriteSized(in, size)
} else {
info, resp, err = file.Overwrite(in)
}
return shouldRetry(resp, err)
})
if err != nil {
return err
}
o.info = info.Node
return nil
}
// Remove an object
func (o *FsObjectAcd) Remove() error {
var resp *http.Response
var err error
err = o.acd.pacer.Call(func() (bool, error) {
resp, err = o.info.Trash()
return shouldRetry(resp, err)
})
return err
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*FsAcd)(nil)
_ fs.Purger = (*FsAcd)(nil)
// _ fs.Copier = (*FsAcd)(nil)
// _ fs.Mover = (*FsAcd)(nil)
// _ fs.DirMover = (*FsAcd)(nil)
_ fs.Object = (*FsObjectAcd)(nil)
)

View File

@@ -1,56 +0,0 @@
// Test AmazonCloudDrive filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package amazonclouddrive_test
import (
"testing"
"github.com/ncw/rclone/amazonclouddrive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func init() {
fstests.NilObject = fs.Object((*amazonclouddrive.FsObjectAcd)(nil))
fstests.RemoteName = "TestAmazonCloudDrive:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -3,7 +3,7 @@
set -e
# This uses gox from https://github.com/mitchellh/gox
# Make sure you've run gox -build-toolchain - not required for go >= 1.5
# Make sure you've run gox -build-toolchain
if [ "$1" == "" ]; then
echo "Syntax: $0 Version"
@@ -13,9 +13,7 @@ VERSION="$1"
rm -rf build
gox -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}" -os "darwin linux freebsd openbsd windows freebsd netbsd plan9 solaris"
# Not implemented yet: nacl dragonfly android
# gox -osarch-list for definitive list
gox -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}"
mv build/rclone-${VERSION}-darwin-amd64 build/rclone-${VERSION}-osx-amd64
mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
@@ -23,12 +21,10 @@ mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
cd build
for d in `ls`; do
cp -a ../MANUAL.txt $d/README.txt
cp -a ../MANUAL.html $d/README.html
cp -a ../README.txt $d/
cp -a ../README.html $d/
cp -a ../rclone.1 $d/
zip -r9 $d.zip $d
d_current=${d/-${VERSION}/-current}
ln $d.zip $d_current.zip
rm -rf $d
done

View File

@@ -1,285 +0,0 @@
// Package dircache provides a simple cache for caching directory to path lookups
package dircache
// _methods are called without the lock
import (
"fmt"
"log"
"strings"
"sync"
)
// DirCache caches paths to directory IDs and vice versa
type DirCache struct {
mu sync.RWMutex
cache map[string]string
invCache map[string]string
fs DirCacher // Interface to find and make stuff
trueRootID string // ID of the absolute root
root string // the path we are working on
rootID string // ID of the root directory
rootParentID string // ID of the root's parent directory
foundRoot bool // Whether we have found the root or not
}
// DirCacher describes an interface for doing the low level directory work
type DirCacher interface {
FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error)
CreateDir(pathID, leaf string) (newID string, err error)
}
// New makes a DirCache
//
// The cache is safe for concurrent use
func New(root string, trueRootID string, fs DirCacher) *DirCache {
d := &DirCache{
trueRootID: trueRootID,
root: root,
fs: fs,
}
d.Flush()
d.ResetRoot()
return d
}
// _get an ID given a path - without lock
func (dc *DirCache) _get(path string) (id string, ok bool) {
id, ok = dc.cache[path]
return
}
// Get an ID given a path
func (dc *DirCache) Get(path string) (id string, ok bool) {
dc.mu.RLock()
id, ok = dc._get(path)
dc.mu.RUnlock()
return
}
// GetInv gets a path given an ID
func (dc *DirCache) GetInv(path string) (id string, ok bool) {
dc.mu.RLock()
id, ok = dc.invCache[path]
dc.mu.RUnlock()
return
}
// _put a path, id into the map without lock
func (dc *DirCache) _put(path, id string) {
dc.cache[path] = id
dc.invCache[id] = path
}
// Put a path, id into the map
func (dc *DirCache) Put(path, id string) {
dc.mu.Lock()
dc._put(path, id)
dc.mu.Unlock()
}
// _flush the map of all data without lock
func (dc *DirCache) _flush() {
dc.cache = make(map[string]string)
dc.invCache = make(map[string]string)
}
// Flush the map of all data
func (dc *DirCache) Flush() {
dc.mu.Lock()
dc._flush()
dc.mu.Unlock()
}
// SplitPath splits a path into directory, leaf
//
// Path shouldn't start or end with a /
//
// If there are no slashes then directory will be "" and leaf = path
func SplitPath(path string) (directory, leaf string) {
lastSlash := strings.LastIndex(path, "/")
if lastSlash >= 0 {
directory = path[:lastSlash]
leaf = path[lastSlash+1:]
} else {
directory = ""
leaf = path
}
return
}
// FindDir finds the directory passed in returning the directory ID
// starting from pathID
//
// Path shouldn't start or end with a /
//
// If create is set it will make the directory if not found
//
// Algorithm:
// Look in the cache for the path, if found return the pathID
// If not found strip the last path off the path and recurse
// Now have a parent directory id, so look in the parent for self and return it
func (dc *DirCache) FindDir(path string, create bool) (pathID string, err error) {
dc.mu.RLock()
defer dc.mu.RUnlock()
return dc._findDir(path, create)
}
// Look for the root and in the cache - safe to call without the mu
func (dc *DirCache) _findDirInCache(path string) string {
// fmt.Println("Finding",path,"create",create,"cache",cache)
// If it is the root, then return it
if path == "" {
// fmt.Println("Root")
return dc.rootID
}
// If it is in the cache then return it
pathID, ok := dc._get(path)
if ok {
// fmt.Println("Cache hit on", path)
return pathID
}
return ""
}
// Unlocked findDir - must have mu
func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error) {
// if !dc.foundRoot {
// return "", fmt.Errorf("FindDir called before FindRoot")
// }
pathID = dc._findDirInCache(path)
if pathID != "" {
return pathID, nil
}
// Split the path into directory, leaf
directory, leaf := SplitPath(path)
// Recurse and find pathID for parent directory
parentPathID, err := dc._findDir(directory, create)
if err != nil {
return "", err
}
// Find the leaf in parentPathID
pathID, found, err := dc.fs.FindLeaf(parentPathID, leaf)
if err != nil {
return "", err
}
// If not found create the directory if required or return an error
if !found {
if create {
pathID, err = dc.fs.CreateDir(parentPathID, leaf)
if err != nil {
return "", fmt.Errorf("Failed to make directory: %v", err)
}
} else {
return "", fmt.Errorf("Couldn't find directory: %q", path)
}
}
// Store the leaf directory in the cache
dc._put(path, pathID)
// fmt.Println("Dir", path, "is", pathID)
return pathID, nil
}
// FindPath finds the leaf and directoryID from a path
//
// If create is set parent directories will be created if they don't exist
func (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string, err error) {
dc.mu.Lock()
defer dc.mu.Unlock()
directory, leaf := SplitPath(path)
directoryID, err = dc._findDir(directory, create)
if err != nil {
if create {
err = fmt.Errorf("Couldn't find or make directory %q: %s", directory, err)
} else {
err = fmt.Errorf("Couldn't find directory %q: %s", directory, err)
}
}
return
}
// FindRoot finds the root directory if not already found
//
// Resets the root directory
//
// If create is set it will make the directory if not found
func (dc *DirCache) FindRoot(create bool) error {
dc.mu.Lock()
defer dc.mu.Unlock()
if dc.foundRoot {
return nil
}
dc.foundRoot = true
rootID, err := dc._findDir(dc.root, create)
if err != nil {
dc.foundRoot = false
return err
}
dc.rootID = rootID
// Find the parent of the root while we still have the root
// directory tree cached
rootParentPath, _ := SplitPath(dc.root)
dc.rootParentID, _ = dc._get(rootParentPath)
// Reset the tree based on dc.root
dc._flush()
// Put the root directory in
dc._put("", dc.rootID)
return nil
}
// RootID returns the ID of the root directory
//
// This should be called after FindRoot
func (dc *DirCache) RootID() string {
dc.mu.Lock()
defer dc.mu.Unlock()
if !dc.foundRoot {
log.Fatalf("Internal Error: RootID() called before FindRoot")
}
return dc.rootID
}
// RootParentID returns the ID of the parent of the root directory
//
// This should be called after FindRoot
func (dc *DirCache) RootParentID() (string, error) {
dc.mu.Lock()
defer dc.mu.Unlock()
if !dc.foundRoot {
return "", fmt.Errorf("Internal Error: RootID() called before FindRoot")
}
if dc.rootParentID == "" {
return "", fmt.Errorf("Internal Error: Didn't find rootParentID")
}
if dc.rootID == dc.trueRootID {
return "", fmt.Errorf("Is root directory")
}
return dc.rootParentID, nil
}
// ResetRoot resets the root directory to the absolute root and clears
// the DirCache
func (dc *DirCache) ResetRoot() {
dc.mu.Lock()
defer dc.mu.Unlock()
dc.foundRoot = false
dc._flush()
// Put the true root in
dc.rootID = dc.trueRootID
// Put the root directory in
dc._put("", dc.rootID)
}

View File

@@ -1,8 +1,8 @@
---
title: "Rclone"
description: "rclone syncs files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox, Google Cloud Storage and Amazon Cloud Drive."
description: "rclone syncs files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox and Google Cloud Storage."
type: page
date: "2015-09-06"
date: "2014-07-17"
groups: ["about"]
---
@@ -18,7 +18,6 @@ Rclone is a command line program to sync files and directories to and from
* Openstack Swift / Rackspace cloud files / Memset Memstore
* Dropbox
* Google Cloud Storage
* Amazon Cloud Drive
* The local filesystem
Features

View File

@@ -1,104 +0,0 @@
---
title: "Amazon Cloud Drive"
description: "Rclone docs for Amazon Cloud Drive"
date: "2015-09-06"
---
<i class="fa fa-google"></i> Amazon Cloud Drive
-----------------------------------------
Paths are specified as `remote:path`
Paths may be as deep as required, eg `remote:directory/subdirectory`.
The initial setup for Amazon cloud drive involves getting a token from
Amazon which you need to do in your browser. `rclone config` walks
you through it.
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) amazon cloud drive
2) drive
3) dropbox
4) google cloud storage
5) local
6) s3
7) swift
type> 1
Amazon Application Client Id - leave blank normally.
client_id>
Amazon Application Client Secret - leave blank normally.
client_secret>
Remote config
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
Got code
--------------------
[remote]
client_id =
client_secret =
token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxx","token_type":"bearer","refresh_token":"xxxxxxxxxxxxxxxxxx","expiry":"2015-09-06T16:07:39.658438471+01:00"}
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
Note that rclone runs a webserver on your local machine to collect the
token as returned from Amazon. This only runs from the moment it
opens your browser to the moment you get back the verification
code. This is on `http://127.0.0.1:53682/` and this it may require
you to unblock it temporarily if you are running a host firewall.
Once configured you can then use `rclone` like this,
List directories in top level of your Amazon cloud drive
rclone lsd remote:
List all the files in your Amazon cloud drive
rclone ls remote:
To copy a local directory to an Amazon cloud drive directory called backup
rclone copy /home/source remote:backup
### Modified time and MD5SUMs ###
Amazon cloud drive doesn't allow modification times to be changed via
the API so these won't be accurate or used for syncing.
It does store MD5SUMs so for a more accurate sync, you can use the
`--checksum` flag.
### Deleting files ###
Any files you delete with rclone will end up in the trash. Amazon
don't provide an API to permanently delete files, nor to empty the
trash, so you will have to do that with one of Amazon's apps or via
the Amazon cloud drive website.
### Limitations ###
Note that Amazon cloud drive is case sensitive so you can't have a
file called "Hello.doc" and one called "hello.doc".
Amazon cloud drive has rate limiting so you may notice errors in the
sync (429 errors). rclone will automatically retry the sync up to 3
times by default (see `--retries` flag) which should hopefully work
around this problem.

View File

@@ -1,20 +0,0 @@
---
title: "Authors"
description: "Rclone Authors and Contributors"
date: "2015-09-28"
---
Authors
-------
* Nick Craig-Wood <nick@craig-wood.com>
Contributors
------------
* Alex Couper <amcouper@gmail.com>
* Leonid Shalupov <leonid@shalupov.com>
* Shimon Doodkin <helpmepro1@gmail.com>
* Colin Nicholson <colin@colinn.com>
* Klaus Post <klauspost@gmail.com>
* Sergey Tolmachev <tolsi.ru@gmail.com>

View File

@@ -1,31 +0,0 @@
---
title: "Bugs"
description: "Rclone Bugs and Limitations"
date: "2014-06-16"
---
Bugs and Limitations
--------------------
### Empty directories are left behind / not created ##
With remotes that have a concept of directory, eg Local and Drive,
empty directories may be left behind, or not created when one was
expected.
This is because rclone doesn't have a concept of a directory - it only
works on objects. Most of the object storage systems can't actually
store a directory so there is nowhere for rclone to store anything
about directories.
You can work round this to some extent with the`purge` command which
will delete everything under the path, **inluding** empty directories.
This may be fixed at some point in
[Issue #100](https://github.com/ncw/rclone/issues/100)
### Directory timestamps aren't preserved ##
For the same reason as the above, rclone doesn't have a concept of a
directory - it only works on objects, therefore it can't preserve the
timestamps of directories.

View File

@@ -1,192 +0,0 @@
---
title: "Documentation"
description: "Rclone Changelog"
date: "2015-10-03"
---
Changelog
---------
* v1.23 - 2015-10-03
* New features
* Implement `rclone size` for measuring remotes
* Fixes
* Fix headless config for drive and gcs
* Tell the user they should try again if the webserver method failed
* Improve output of `--dump-headers`
* S3
* Allow anonymous access to public buckets
* Swift
* Stop chunked operations logging "Failed to read info: Object Not Found"
* Use Content-Length on uploads for extra reliability
* v1.22 - 2015-09-28
* Implement rsync like include and exclude flags
* swift
* Support files > 5GB - thanks Sergey Tolmachev
* v1.21 - 2015-09-22
* New features
* Display individual transfer progress
* Make lsl output times in localtime
* Fixes
* Fix allowing user to override credentials again in Drive, GCS and ACD
* Amazon Cloud Drive
* Implement compliant pacing scheme
* Google Drive
* Make directory reads concurrent for increased speed.
* v1.20 - 2015-09-15
* New features
* Amazon Cloud Drive support
* Oauth support redone - fix many bugs and improve usability
* Use "golang.org/x/oauth2" as oauth libary of choice
* Improve oauth usability for smoother initial signup
* drive, googlecloudstorage: optionally use auto config for the oauth token
* Implement --dump-headers and --dump-bodies debug flags
* Show multiple matched commands if abbreviation too short
* Implement server side move where possible
* local
* Always use UNC paths internally on Windows - fixes a lot of bugs
* dropbox
* force use of our custom transport which makes timeouts work
* Thanks to Klaus Post for lots of help with this release
* v1.19 - 2015-08-28
* New features
* Server side copies for s3/swift/drive/dropbox/gcs
* Move command - uses server side copies if it can
* Implement --retries flag - tries 3 times by default
* Build for plan9/amd64 and solaris/amd64 too
* Fixes
* Make a current version download with a fixed URL for scripting
* Ignore rmdir in limited fs rather than throwing error
* dropbox
* Increase chunk size to improve upload speeds massively
* Issue an error message when trying to upload bad file name
* v1.18 - 2015-08-17
* drive
* Add `--drive-use-trash` flag so rclone trashes instead of deletes
* Add "Forbidden to download" message for files with no downloadURL
* dropbox
* Remove datastore
* This was deprecated and it caused a lot of problems
* Modification times and MD5SUMs no longer stored
* Fix uploading files > 2GB
* s3
* use official AWS SDK from github.com/aws/aws-sdk-go
* **NB** will most likely require you to delete and recreate remote
* enable multipart upload which enables files > 5GB
* tested with Ceph / RadosGW / S3 emulation
* many thanks to Sam Liston and Brian Haymore at the [Utah
Center for High Performance Computing](https://www.chpc.utah.edu/) for a Ceph test account
* misc
* Show errors when reading the config file
* Do not print stats in quiet mode - thanks Leonid Shalupov
* Add FAQ
* Fix created directories not obeying umask
* Linux installation instructions - thanks Shimon Doodkin
* v1.17 - 2015-06-14
* dropbox: fix case insensitivity issues - thanks Leonid Shalupov
* v1.16 - 2015-06-09
* Fix uploading big files which was causing timeouts or panics
* Don't check md5sum after download with --size-only
* v1.15 - 2015-06-06
* Add --checksum flag to only discard transfers by MD5SUM - thanks Alex Couper
* Implement --size-only flag to sync on size not checksum & modtime
* Expand docs and remove duplicated information
* Document rclone's limitations with directories
* dropbox: update docs about case insensitivity
* v1.14 - 2015-05-21
* local: fix encoding of non utf-8 file names - fixes a duplicate file problem
* drive: docs about rate limiting
* google cloud storage: Fix compile after API change in "google.golang.org/api/storage/v1"
* v1.13 - 2015-05-10
* Revise documentation (especially sync)
* Implement --timeout and --conntimeout
* s3: ignore etags from multipart uploads which aren't md5sums
* v1.12 - 2015-03-15
* drive: Use chunked upload for files above a certain size
* drive: add --drive-chunk-size and --drive-upload-cutoff parameters
* drive: switch to insert from update when a failed copy deletes the upload
* core: Log duplicate files if they are detected
* v1.11 - 2015-03-04
* swift: add region parameter
* drive: fix crash on failed to update remote mtime
* In remote paths, change native directory separators to /
* Add synchronization to ls/lsl/lsd output to stop corruptions
* Ensure all stats/log messages to go stderr
* Add --log-file flag to log everything (including panics) to file
* Make it possible to disable stats printing with --stats=0
* Implement --bwlimit to limit data transfer bandwidth
* v1.10 - 2015-02-12
* s3: list an unlimited number of items
* Fix getting stuck in the configurator
* v1.09 - 2015-02-07
* windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
* local: Fix directory separators on Windows
* drive: fix rate limit exceeded errors
* v1.08 - 2015-02-04
* drive: fix subdirectory listing to not list entire drive
* drive: Fix SetModTime
* dropbox: adapt code to recent library changes
* v1.07 - 2014-12-23
* google cloud storage: fix memory leak
* v1.06 - 2014-12-12
* Fix "Couldn't find home directory" on OSX
* swift: Add tenant parameter
* Use new location of Google API packages
* v1.05 - 2014-08-09
* Improved tests and consequently lots of minor fixes
* core: Fix race detected by go race detector
* core: Fixes after running errcheck
* drive: reset root directory on Rmdir and Purge
* fs: Document that Purger returns error on empty directory, test and fix
* google cloud storage: fix ListDir on subdirectory
* google cloud storage: re-read metadata in SetModTime
* s3: make reading metadata more reliable to work around eventual consistency problems
* s3: strip trailing / from ListDir()
* swift: return directories without / in ListDir
* v1.04 - 2014-07-21
* google cloud storage: Fix crash on Update
* v1.03 - 2014-07-20
* swift, s3, dropbox: fix updated files being marked as corrupted
* Make compile with go 1.1 again
* v1.02 - 2014-07-19
* Implement Dropbox remote
* Implement Google Cloud Storage remote
* Verify Md5sums and Sizes after copies
* Remove times from "ls" command - lists sizes only
* Add add "lsl" - lists times and sizes
* Add "md5sum" command
* v1.01 - 2014-07-04
* drive: fix transfer of big files using up lots of memory
* v1.00 - 2014-07-03
* drive: fix whole second dates
* v0.99 - 2014-06-26
* Fix --dry-run not working
* Make compatible with go 1.1
* v0.98 - 2014-05-30
* s3: Treat missing Content-Length as 0 for some ceph installations
* rclonetest: add file with a space in
* v0.97 - 2014-05-05
* Implement copying of single files
* s3 & swift: support paths inside containers/buckets
* v0.96 - 2014-04-24
* drive: Fix multiple files of same name being created
* drive: Use o.Update and fs.Put to optimise transfers
* Add version number, -V and --version
* v0.95 - 2014-03-28
* rclone.org: website, docs and graphics
* drive: fix path parsing
* v0.94 - 2014-03-27
* Change remote format one last time
* GNU style flags
* v0.93 - 2014-03-16
* drive: store token in config file
* cross compile other versions
* set strict permissions on config file
* v0.92 - 2014-03-15
* Config fixes and --config option
* v0.91 - 2014-03-15
* Make config file
* v0.90 - 2013-06-27
* Project named rclone
* v0.00 - 2012-11-18
* Project started

View File

@@ -5,17 +5,8 @@ date: "2014-04-26"
---
Contact the rclone project
--------------------------
The project website is at:
* https://github.com/ncw/rclone
There you can file bug reports, ask for help or contribute pull
requests.
See also
* [Github project page for source, reporting bugs and pull requests](http://github.com/ncw/rclone)
* <a href="https://google.com/+RcloneOrg" rel="publisher">Google+ page for general comments</a></li>
Or email [Nick Craig-Wood](mailto:nick@craig-wood.com)

View File

@@ -1,9 +1,22 @@
---
title: "Documentation"
description: "Rclone Usage"
date: "2015-06-06"
description: "Rclone Documentation"
date: "2014-07-17"
---
Install
-------
Rclone is a Go program and comes as a single binary file.
[Download](/downloads/) the relevant binary.
Or alternatively if you have Go installed use
go get github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`.
Configure
---------
@@ -17,13 +30,11 @@ option:
rclone config
See the following for detailed instructions for
See below for detailed instructions for
* [Google drive](/drive/)
* [Amazon S3](/s3/)
* [Swift / Rackspace Cloudfiles / Memset Memstore](/swift/)
* [Dropbox](/dropbox/)
* [Google Cloud Storage](/googlecloudstorage/)
* [Local filesystem](/local/)
Usage
@@ -44,271 +55,104 @@ You can define as many storage paths as you like in the config file.
Subcommands
-----------
### rclone copy source:path dest:path ###
rclone copy source:path dest:path
Copy the source to the destination. Doesn't transfer
unchanged files, testing by size and modification time or
unchanged files, testing first by modification time then by
MD5SUM. Doesn't delete files from the destination.
### rclone sync source:path dest:path ###
rclone sync source:path dest:path
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the `--dry-run` flag.
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the -dry-run flag.
### rclone ls remote:path ###
rclone ls [remote:path]
List all the objects in the the path with size and path.
List all the objects in the the path with sizes.
### rclone lsd remote:path ###
rclone lsl [remote:path]
List all directories/containers/buckets in the the path.
List all the objects in the the path with sizes and timestamps.
### rclone lsl remote:path ###
rclone lsd [remote:path]
List all the objects in the the path with modification time,
size and path.
List all directories/objects/buckets in the the path.
### rclone md5sum remote:path ###
Produces an md5sum file for all the objects in the path. This
is in the same format as the standard md5sum tool produces.
### rclone size remote:path ###
Prints the total size of objects in remote:path and the number of
objects.
### rclone mkdir remote:path ###
rclone mkdir remote:path
Make the path if it doesn't already exist
### rclone rmdir remote:path ###
rclone rmdir remote:path
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.
### rclone purge remote:path ###
rclone purge remote:path
Remove the path and all of its contents.
### rclone check source:path dest:path ###
rclone check source:path dest:path
Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
### rclone config ###
rclone md5sum remote:path
Enter an interactive configuration session.
Produces an md5sum file for all the objects in the path. This is in
the same format as the standard md5sum tool produces.
General options:
### rclone help ###
```
--checkers=8: Number of checkers to run in parallel.
--transfers=4: Number of file transfers to run in parallel.
--config="~/.rclone.conf": Config file.
-n, --dry-run=false: Do a trial run with no permanent changes
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats
-v, --verbose=false: Print lots more stuff
```
Prints help on rclone commands and options.
Developer options:
Server Side Copy
----------------
```
--cpuprofile="": Write cpu profile to file
```
Drive, S3, Dropbox, Swift and Google Cloud Storage support server side
copy.
This means if you want to copy one folder to another then rclone won't
download all the files and re-upload them; it will instruct the server
to copy them in place.
Eg
rclone copy s3:oldbucket s3:newbucket
Will copy the contents of `oldbucket` to `newbucket` without
downloading and re-uploading.
Remotes which don't support server side copy (eg local) **will**
download and re-upload in this case.
Server side copies are used with `sync` and `copy` and will be
identified in the log when using the `-v` flag.
Server side copies will only be attempted if the remote names are the
same.
This can be used when scripting to make aged backups efficiently, eg
rclone sync remote:current-backup remote:previous-backup
rclone sync /path/to/files remote:current-backup
Options
License
-------
Rclone has a number of options to control its behaviour.
This is free software under the terms of MIT the license (check the
COPYING file included in this package).
Options which use TIME use the go time parser. A duration string is a
possibly signed sequence of decimal numbers, each with optional
fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
Bugs
----
Options which use SIZE use kByte by default. However a suffix of `k`
for kBytes, `M` for MBytes and `G` for GBytes may be used. These are
the binary units, eg 2**10, 2**20, 2**30 respectively.
* Doesn't sync individual files yet, only directories.
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work
### --bwlimit=SIZE ###
Contact and support
-------------------
Bandwidth limit in kBytes/s, or use suffix k|M|G. The default is `0`
which means to not limit bandwidth.
The project website is at:
For example to limit bandwidth usage to 10 MBytes/s use `--bwlimit 10M`
* https://github.com/ncw/rclone
This only limits the bandwidth of the data transfer, it doesn't limit
the bandwith of the directory listings etc.
There you can file bug reports, ask for help or contribute patches.
### --checkers=N ###
Authors
-------
The number of checkers to run in parallel. Checkers do the equality
checking of files during a sync. For some storage systems (eg s3,
swift, dropbox) this can take a significant amount of time so they are
run in parallel.
* Nick Craig-Wood <nick@craig-wood.com>
The default is to run 8 checkers in parallel.
Contributors
------------
### -c, --checksum ###
Normally rclone will look at modification time and size of files to
see if they are equal. If you set this flag then rclone will check
MD5SUM and size to determine if files are equal.
This is very useful when transferring between remotes which store the
MD5SUM on the object which include swift, s3, drive, and google cloud
storage.
Eg `rclone --checksum sync s3:/bucket swift:/bucket` would run much
quicker than without the `--checksum` flag.
When using this flag, rclone won't update mtimes of remote files if
they are incorrect as it would normally.
### --config=CONFIG_FILE ###
Specify the location of the rclone config file. Normally this is in
your home directory as a file called `.rclone.conf`. If you run
`rclone -h` and look at the help for the `--config` option you will
see where the default location is for you. Use this flag to override
the config location, eg `rclone --config=".myconfig" .config`.
### --contimeout=TIME ###
Set the connection timeout. This should be in go time format which
looks like `5s` for 5 seconds, `10m` for 10 minutes, or `3h30m`.
The connection timeout is the amount of time rclone will wait for a
connection to go through to a remote object storage system. It is
`1m` by default.
### -n, --dry-run ###
Do a trial run with no permanent changes. Use this in combination
with the `-v` flag to see what rclone would do without actually doing
it. Useful when setting up the `sync` command.
### --log-file=FILE ###
Log all of rclone's output to FILE. This is not active by default.
This can be useful for tracking down problems with syncs in
combination with the `-v` flag.
### --modify-window=TIME ###
When checking whether a file has been modified, this is the maximum
allowed time difference that a file can have and still be considered
equivalent.
The default is `1ns` unless this is overridden by a remote. For
example OS X only stores modification times to the nearest second so
if you are reading and writing to an OS X filing system this will be
`1s` by default.
This command line flag allows you to override that computed default.
### -q, --quiet ###
Normally rclone outputs stats and a completion message. If you set
this flag it will make as little output as possible.
### --size-only ###
Normally rclone will look at modification time and size of files to
see if they are equal. If you set this flag then rclone will check
only the size.
This can be useful transferring files from dropbox which have been
modified by the desktop sync client which doesn't set checksums of
modification times in the same way as rclone.
When using this flag, rclone won't update mtimes of remote files if
they are incorrect as it would normally.
### --stats=TIME ###
Rclone will print stats at regular intervals to show its progress.
This sets the interval.
The default is `1m`. Use 0 to disable.
### --timeout=TIME ###
This sets the IO idle timeout. If a transfer has started but then
becomes idle for this long it is considered broken and disconnected.
The default is `5m`. Set to 0 to disable.
### --transfers=N ###
The number of file transfers to run in parallel. It can sometimes be
useful to set this to a smaller number if the remote is giving a lot
of timeouts or bigger if you have lots of bandwidth and a fast remote.
The default is to run 4 file transfers in parallel.
### -v, --verbose ###
If you set this flag, rclone will become very verbose telling you
about every file it considers and transfers.
Very useful for debugging.
### -V, --version ###
Prints the version number
Developer options
-----------------
These options are useful when developing or debugging rclone. There
are also some more remote specific options which aren't documented
here which are used for testing. These start with remote name eg
`--drive-test-option`.
### --cpuprofile=FILE ###
Write cpu profile to file. This can be analysed with `go tool pprof`.
Filtering
---------
For the filtering options
* `--delete-excluded`
* `--filter`
* `--filter-from`
* `--exclude`
* `--exclude-from`
* `--include`
* `--include-from`
* `--files-from`
* `--min-size`
* `--max-size`
* `--dump-filters`
See the [filtering section](/filtering/).
* Your name goes here!

View File

@@ -1,30 +0,0 @@
---
title: "Flowers for My Wife"
description: "Flowers for My Wife."
type: page
date: "2015-09-06"
---
Flowers for My Wife
===================
Rclone is a pure open source for love-not-money project. However I've
had requests for a donation page and coding it does take me away from
something else I love - my wonderful wife.
So if you would like to send a donation, I will use it to buy flowers
for her which will make her very happy.
<form action="https://www.paypal.com/cgi-bin/webscr" method="post" target="_top">
<input type="hidden" name="cmd" value="_s-xclick">
<input type="hidden" name="hosted_button_id" value="XQMMNUD5ZY49J">
<input type="image" src="https://www.paypalobjects.com/en_US/GB/i/btn/btn_donateCC_LG.gif" border="0" name="submit" alt="PayPal The safer, easier way to pay online.">
<img alt="" border="0" src="https://www.paypalobjects.com/en_GB/i/scr/pixel.gif" width="1" height="1">
</form>
If you would prefer to express your gratitude by promoting the
project, or helping with it, I'd be over the moon with that too!
Thanks
Nick

View File

@@ -2,73 +2,34 @@
title: "Rclone downloads"
description: "Download rclone binaries for your OS."
type: page
date: "2015-10-03"
date: "2014-07-19"
---
Rclone Download v1.23
Rclone Download v1.02
=====================
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-windows-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-osx-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.23-linux-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.02-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.23-freebsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.02-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.23-netbsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.02-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-openbsd-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-plan9-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-plan9-amd64.zip)
* Solaris
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-solaris-amd64.zip)
Downloads for scripting
=======================
If you would like to download the current version (maybe from a
script) from a URL which doesn't change then you can use these links.
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-plan9-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-plan9-amd64.zip)
* Solaris
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-solaris-amd64.zip)
Older Downloads
==============
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-plan9-386.zip)
Older downloads can be found [here](http://downloads.rclone.org/)

View File

@@ -31,44 +31,5 @@ Rclone Download VERSION
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-plan9-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-plan9-amd64.zip)
* Solaris
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-solaris-amd64.zip)
Downloads for scripting
=======================
If you would like to download the current version (maybe from a
script) from a URL which doesn't change then you can use these links.
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-plan9-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-plan9-amd64.zip)
* Solaris
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-solaris-amd64.zip)
Older Downloads
==============
Older downloads can be found [here](http://downloads.rclone.org/)

View File

@@ -1,7 +1,7 @@
---
title: "Google drive"
description: "Rclone docs for Google drive"
date: "2015-09-12"
date: "2014-04-26"
---
<i class="fa fa-google"></i> Google Drive
@@ -34,21 +34,15 @@ Choose a number from below
3) local
4) drive
type> 4
Google Application Client Id - leave blank normally.
Google Application Client Id - leave blank to use rclone's.
client_id>
Google Application Client Secret - leave blank normally.
Google Application Client Secret - leave blank to use rclone's.
client_secret>
Remote config
Use auto config?
* Say Y if not sure
* Say N if you are working on a remote or headless machine or Y didn't work
y) Yes
n) No
y/n> y
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
Got code
Go to the following link in your browser
https://accounts.google.com/o/oauth2/auth?access_type=&approval_prompt=&client_id=XXXXXXXXXXXX.apps.googleusercontent.com&redirect_uri=urn%3XXXXX%3Awg%3Aoauth%3XX.0%3Aoob&response_type=code&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&state=state
Log in, then type paste the token that is returned in the browser here
Enter verification code> X/XXXXXXXXXXXXXXXXXX-XXXXXXXXX.XXXXXXXXX-XXXXX_XXXXXXX_XXXXXXX
--------------------
[remote]
client_id =
@@ -61,13 +55,6 @@ d) Delete this remote
y/e/d> y
```
Note that rclone runs a webserver on your local machine to collect the
token as returned from Google if you use auto config mode. This only
runs from the moment it opens your browser to the moment you get back
the verification code. This is on `http://127.0.0.1:53682/` and this
it may require you to unblock it temporarily if you are running a host
firewall, or use manual mode.
You can then use it like this,
List directories in top level of your drive
@@ -82,31 +69,7 @@ To copy a local directory to a drive directory called backup
rclone copy /home/source remote:backup
### Modified time ###
Modified time
-------------
Google drive stores modification times accurate to 1 ms.
### Revisions ###
Google drive stores revisions of files. When you upload a change to
an existing file to google drive using rclone it will create a new
revision of that file.
Revisions follow the standard google policy which at time of writing
was
* They are deleted after 30 days or 100 revisions (whatever comes first).
* They do not count towards a user storage quota.
### Deleting files ###
By default rclone will delete files permanently when requested. If
sending them to the trash is required instead then use the
`--drive-use-trash` flag.
### Limitations ###
Drive has quite a lot of rate limiting. This causes rclone to be
limited to transferring about 2 files per second only. Individual
files may be transferred much faster at 100s of MBytes/s but lots of
small files can take a long time.

View File

@@ -37,9 +37,9 @@ Choose a number from below
5) dropbox
6) drive
type> 5
Dropbox App Key - leave blank normally.
Dropbox App Key - leave blank to use rclone's.
app_key>
Dropbox App Secret - leave blank normally.
Dropbox App Secret - leave blank to use rclone's.
app_secret>
Remote config
Please visit:
@@ -71,18 +71,10 @@ To copy a local directory to a dropbox directory called backup
rclone copy /home/source remote:backup
### Modified time and MD5SUMs ###
Modified time
-------------
Dropbox doesn't have the capability of storing modification times or
MD5SUMs so syncs will effectively have the `--size-only` flag set.
### Limitations ###
Note that Dropbox is case sensitive so you can't have a file called
"Hello.doc" and one called "hello.doc".
There are some file names such as `thumbs.db` which Dropbox can't
store. There is a full list of them in the ["Ignored Files" section
of this document](https://www.dropbox.com/en/help/145). Rclone will
issue an error message `File name disallowed - not uploading` if it
attempt to upload one of those file names, but the sync won't fail.
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
a Dropbox datastore called "rclone". Dropbox datastores are limited
to 100,000 rows so this is the maximum number of files rclone can
manage on Dropbox.

View File

@@ -1,103 +0,0 @@
---
title: "FAQ"
description: "Rclone Frequently Asked Questions"
date: "2015-08-27"
---
Frequently Asked Questions
--------------------------
### Do all cloud storage systems support all rclone commands ###
Yes they do. All the rclone commands (eg `sync`, `copy` etc) will
work on all the remote storage systems.
### Can I copy the config from one machine to another ###
Sure! Rclone stores all of its config in a single file. If you want
to find this file, the simplest way is to run `rclone -h` and look at
the help for the `--config` flag which will tell you where it is. Eg,
```
$ rclone -h
Sync files and directories to and from local and remote object stores - v1.18.
[snip]
Options:
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
--checkers=8: Number of checkers to run in parallel.
-c, --checksum=false: Skip based on checksum & size, not mod-time & size
--config="/home/user/.rclone.conf": Config file.
[snip]
```
So in this config the config file can be found in
`/home/user/.rclone.conf`.
Just copy that to the equivalent place in the destination (run `rclone
-h` above again on the destination machine if not sure).
### Can rclone sync directly from drive to s3 ###
Rclone can sync between two remote cloud storage systems just fine.
Note that it effectively downloads the file and uploads it again, so
the node running rclone would need to have lots of bandwidth.
The syncs would be incremental (on a file by file basis).
Eg
rclone sync drive:Folder s3:bucket
### Using rclone from multiple locations at the same time ###
You can use rclone from multiple places at the same time if you choose
different subdirectory for the output, eg
```
Server A> rclone sync /tmp/whatever remote:ServerA
Server B> rclone sync /tmp/whatever remote:ServerB
```
If you sync to the same directory then you should use rclone copy
otherwise the two rclones may delete each others files, eg
```
Server A> rclone copy /tmp/whatever remote:Backup
Server B> rclone copy /tmp/whatever remote:Backup
```
The file names you upload from Server A and Server B should be
different in this case, otherwise some file systems (eg Drive) may
make duplicates.
### Why doesn't rclone support partial transfers / binary diffs like rsync? ###
Rclone stores each file you transfer as a native object on the remote
cloud storage system. This means that you can see the files you
upload as expected using alternative access methods (eg using the
Google Drive web interface). There is a 1:1 mapping between files on
your hard disk and objects created in the cloud storage system.
Cloud storage systems (at least none I've come across yet) don't
support partially uploading an object. You can't take an existing
object, and change some bytes in the middle of it.
It would be possible to make a sync system which stored binary diffs
instead of whole objects like rclone does, but that would break the
1:1 mapping of files on your hard disk to objects in the remote cloud
storage system.
All the cloud storage systems support partial downloads of content, so
it would be possible to make partial downloads work. However to make
this work efficiently this would require storing a significant amount
of metadata, which breaks the desired 1:1 mapping of files to objects.
### Can rclone do bi-directional sync? ###
No, not at present. rclone only does uni-directional sync from A ->
B. It may do in the future though since it has all the primitives - it
just requires writing the algorithm to do it.

View File

@@ -1,273 +0,0 @@
---
title: "Filtering"
description: "Filtering, includes and excludes"
date: "2015-09-27"
---
# Filtering, includes and excludes #
Rclone has a sophisticated set of include and exclude rules. Some of
these are based on patterns and some on other things like file size.
Each path as it passes through rclone is matched against the include
and exclude rules. The paths are matched without a leading `/`.
For example the files might be passed to the matching engine like this
* `file1.jpg`
* `file2.jpg`
* `directory/file3.jpg`
## Patterns ##
The patterns used to match files for inclusion or exclusion are based
on "file globs" as used by the unix shell.
If the pattern starts with a `/` then it only matches at the top level
of the directory tree. If it doesn't start with `/` then it is
matched starting at the end of the path, but it will only match a
complete path element.
file.jpg - matches "file.jpg"
- matches "directory/file.jpg"
- doesn't match "afile.jpg"
- doesn't match "directory/afile.jpg"
/file.jpg - matches "file.jpg"
- doesn't match "afile.jpg"
- doesn't match "directory/file.jpg"
A `*` matches anything but not a `/`.
*.jpg - matches "file.jpg"
- matches "directory/file.jpg"
- doesn't match "file.jpg/anotherfile.jpg"
Use `**` to match anything, including slashes.
dir/** - matches "dir/file.jpg"
- matches "dir/dir1/dir2/file.jpg"
- doesn't match "directory/file.jpg"
- doesn't match "adir/file.jpg"
A `?` matches any character except a slash `/`.
l?ss - matches "less"
- matches "lass"
- doesn't match "floss"
A `[` and `]` together make a a character class, such as `[a-z]` or
`[aeiou]` or `[[:alpha:]]`. See the [go regexp
docs](https://golang.org/pkg/regexp/syntax/) for more info on these.
h[ae]llo - matches "hello"
- matches "hallo"
- doesn't match "hullo"
A `{` and `}` define a choice between elements. It should contain a
comma seperated list of patterns, any of which might match. These
patterns can contain wildcards.
{one,two}_potato - matches "one_potato"
- matches "two_potato"
- doesn't match "three_potato"
- doesn't match "_potato"
Special characters can be escaped with a `\` before them.
\*.jpg - matches "*.jpg"
\\.jpg - matches "\.jpg"
\[one\].jpeg - matches "[one].jpg"
### Differences between rsync and rclone patterns ###
Rclone implements bash style `{a,b,c}` glob matching which rsync doesn't.
Rclone ignores `/` at the end of a pattern.
Rclone always does a wildcard match so `\` must always escape a `\`.
## How the rules are used ##
Rclone maintains a list of include rules and exclude rules.
Each file is matched in order against the list until it finds a match.
The file is then included or excluded according to the rule type.
If the matcher falls off the bottom of the list then the path is
included.
For example given the following rules, `+` being include, `-` being
exclude,
- secret*.jpg
+ *.jpg
+ *.png
+ file2.avi
- *
This would include
* `file1.jpg`
* `file3.png`
* `file2.avi`
This would exclude
* `secret17.jpg`
* non `*.jpg` and `*.png`
## Adding filtering rules ##
Filtering rules are added with the following command line flags.
### `--exclude` - Exclude files matching pattern ###
Add a single exclude rule with `--exclude`.
Eg `--exclude *.bak` to exclude all bak files from the sync.
### `--exclude-from` - Read exclude patterns from file ###
Add exclude rules from a file.
Prepare a file like this `exclude-file.txt`
# a sample exclude rule file
*.bak
file2.jpg
Then use as `--exclude-from exclude-file.txt`. This will sync all
files except those ending in `bak` and `file2.jpg`.
This is useful if you have a lot of rules.
### `--include` - Include files matching pattern ###
Add a single include rule with `--include`.
Eg `--include *.{png,jpg}` to include all `png` and `jpg` files in the
backup and no others.
This adds an implicit `--exclude *` at the end of the filter list.
### `--include-from` - Read include patterns from file ###
Add include rules from a file.
Prepare a file like this `include-file.txt`
# a sample include rule file
*.jpg
*.png
file2.avi
Then use as `--include-from include-file.txt`. This will sync all
`jpg`, `png` files and `file2.avi`.
This is useful if you have a lot of rules.
This adds an implicit `--exclude *` at the end of the filter list.
### `--filter` - Add a file-filtering rule ###
This can be used to add a single include or exclude rule. Include
rules start with `+ ` and exclude rules start with `- `. A special
rule called `!` can be used to clear the existing rules.
Eg `--filter "- *.bak"` to exclude all bak files from the sync.
### `--filter-from` - Read filtering patterns from a file ###
Add include/exclude rules from a file.
Prepare a file like this `filter-file.txt`
# a sample exclude rule file
- secret*.jpg
+ *.jpg
+ *.png
+ file2.avi
# exclude everything else
- *
Then use as `--filter-from filter-file.txt`. The rules are processed
in the order that they are defined.
This example will include all `jpg` and `png` files, exclude any files
matching `secret*.jpg` and include `file2.avi`. Everything else will
be excluded from the sync.
### `--files-from` - Read list of source-file names ###
This reads a list of file names from the file passed in and **only**
these files are transferred. The filtering rules are ignored
completely if you use this option.
Prepare a file like this `files-from.txt`
# comment
file1.jpg
file2.jpg
Then use as `--files-from files-from.txt`. This will only transfer
`file1.jpg` and `file2.jpg` providing they exist.
### `--min-size` - Don't transfer any file smaller than this ###
This option controls the minimum size file which will be transferred.
This defaults to `kBytes` but a suffix of `k`, `M`, or `G` can be
used.
For example `--min-size 50k` means no files smaller than 50kByte will be
transferred.
### `--max-size` - Don't transfer any file larger than this ###
This option controls the maximum size file which will be transferred.
This defaults to `kBytes` but a suffix of `k`, `M`, or `G` can be
used.
For example `--max-size 1G` means no files larger than 1GByte will be
transferred.
### `--delete-excluded` - Delete files on dest excluded from sync ###
**Important** this flag is dangerous - use with `--dry-run` and `-v` first.
When doing `rclone sync` this will delete any files which are excluded
from the sync on the destination.
If for example you did a sync from `A` to `B` without the `--min-size 50k` flag
rclone sync A: B:
Then you repeated it like this with the `--delete-excluded`
rclone --min-size 50k --delete-excluded sync A: B:
This would delete all files on `B` which are less than 50 kBytes as
these are now excluded from the sync.
Always test first with `--dry-run` and `-v` before using this flag.
### `--dump-filters` - dump the filters to the output ###
This dumps the defined filters to the output as regular expressions.
Useful for debugging.
## Quoting shell metacharacters ##
The examples above may not work verbatim in your shell as they have
shell metacharacters in them (eg `*`), and may require quoting.
Eg linux, OSX
* `--include \*.jpg`
* `--include '*.jpg'`
* `--include='*.jpg'`
In Windows the expansion is done by the command not the shell so this
should work fine
* `--include *.jpg`

View File

@@ -1,7 +1,7 @@
---
title: "Google Cloud Storage"
description: "Rclone docs for Google Cloud Storage"
date: "2015-09-12"
date: "2014-07-17"
---
<i class="fa fa-google"></i> Google Cloud Storage
@@ -35,9 +35,9 @@ Choose a number from below
5) dropbox
6) drive
type> 4
Google Application Client Id - leave blank normally.
Google Application Client Id - leave blank to use rclone's.
client_id>
Google Application Client Secret - leave blank normally.
Google Application Client Secret - leave blank to use rclone's.
client_secret>
Project number optional - needed only for list/create/delete buckets - see your developer console.
project_number> 12345678
@@ -70,17 +70,10 @@ Choose a number from below, or type in your own value
5) publicReadWrite
bucket_acl> 2
Remote config
Remote config
Use auto config?
* Say Y if not sure
* Say N if you are working on a remote or headless machine or Y didn't work
y) Yes
n) No
y/n> y
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
Got code
Go to the following link in your browser
https://accounts.google.com/o/oauth2/auth?access_type=&approval_prompt=&client_id=XXXXXXXXXXXX.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&response_type=code&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdevstorage.full_control&state=state
Log in, then type paste the token that is returned in the browser here
Enter verification code> x/xxxxxxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxxxxxxxxxx_xxxxxxxx
--------------------
[remote]
type = google cloud storage
@@ -97,13 +90,6 @@ d) Delete this remote
y/e/d> y
```
Note that rclone runs a webserver on your local machine to collect the
token as returned from Google if you use auto config mode. This only
runs from the moment it opens your browser to the moment you get back
the verification code. This is on `http://127.0.0.1:53682/` and this
it may require you to unblock it temporarily if you are running a host
firewall, or use manual mode.
This remote is called `remote` and can now be used like this
See all the buckets in your project
@@ -123,7 +109,8 @@ files in the bucket.
rclone sync /home/local/directory remote:bucket
### Modified time ###
Modified time
-------------
Google google cloud storage stores md5sums natively and rclone stores
modification times as metadata on the object, under the "mtime" key in

View File

@@ -1,39 +0,0 @@
---
title: "Install"
description: "Rclone Installation"
date: "2015-06-12"
---
Install
-------
Rclone is a Go program and comes as a single binary file.
[Download](/downloads/) the relevant binary.
Or alternatively if you have Go installed use
go get github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`. If you have built
rclone before then you will want to update its dependencies first with
this (remove `-f` if using go < 1.4)
go get -u -v -f github.com/ncw/rclone/...
See the [Usage section](/docs/) of the docs for how to use rclone, or
run `rclone -h`.
linux binary downloaded files install example
-------
unzip rclone-v1.17-linux-amd64.zip
cd rclone-v1.17-linux-amd64
#copy binary file
sudo cp rclone /usr/sbin/
sudo chown root:root /usr/sbin/rclone
sudo chmod 755 /usr/sbin/rclone
#install manpage
sudo mkdir -p /usr/local/share/man/man1
sudo cp rclone.1 /usr/local/share/man/man1/
sudo mandb

View File

@@ -1,34 +0,0 @@
---
title: "Licence"
description: "Rclone Licence"
date: "2015-06-06"
---
License
-------
This is free software under the terms of MIT the license (check the
COPYING file included with the source code).
```
Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
```

View File

@@ -16,23 +16,10 @@ Will sync `/home/source` to `/tmp/destination`
These can be configured into the config file for consistencies sake,
but it is probably easier not to.
### Modified time ###
Modified time
-------------
Rclone reads and writes the modified time using an accuracy determined by
the OS. Typically this is 1ns on Linux, 10 ns on Windows and 1 Second
on OS X.
### Filenames ###
Filenames are expected to be encoded in UTF-8 on disk. This is the
normal case for Windows and OS X. There is a bit more uncertainty in
the Linux world, but new distributions will have UTF-8 encoded files
names.
If an invalid (non-UTF8) filename is read, the invalid caracters will
be replaced with the unicode replacement character, '<27>'. `rclone`
will emit a debug message in this case (use `-v` to see), eg
```
Local file system at .: Replacing invalid UTF-8 characters in "gro\xdf"
```

View File

@@ -1,71 +0,0 @@
---
title: "Overview of cloud storage systems"
description: "Overview of cloud storage systems"
type: page
date: "2015-09-06"
---
# Overview of cloud storage systems #
Each cloud storage system is slighly different. Rclone attempts to
provide a unified interface to them, but some underlying differences
show through.
## Features ##
Here is an overview of the major features of each cloud storage system.
| Name | MD5SUM | ModTime | Case Sensitive | Duplicate Files |
| ---------------------- |:-------:|:-------:|:--------------:|:---------------:|
| Google Drive | Yes | Yes | No | Yes |
| Amazon S3 | Yes | Yes | No | No |
| Openstack Swift | Yes | Yes | No | No |
| Dropbox | No | No | Yes | No |
| Google Cloud Storage | Yes | Yes | No | No |
| Amazon Cloud Drive | Yes | No | Yes | No |
| The local filesystem | Yes | Yes | Depends | No |
### MD5SUM ###
The cloud storage system supports MD5SUMs of the objects. This
is used if available when transferring data as an integrity check and
can be specifically used with the `--checksum` flag in syncs and in
the `check` command.
### ModTime ###
The cloud storage system supports setting modification times on
objects. If it does then this enables a using the modification times
as part of the sync. If not then only the size will be checked by
default, though the MD5SUM can be checked with the `--checksum` flag.
All cloud storage systems support some kind of date on the object and
these will be set when transferring from the cloud storage system.
### Case Sensitive ###
If a cloud storage systems is case sensitive then it is possible to
have two files which differ only in case, eg `file.txt` and
`FILE.txt`. If a cloud storage system is case insensitive then that
isn't possible.
This can cause problems when syncing between a case insensitive
system and a case sensitive system. The symptom of this is that no
matter how many times you run the sync it never completes fully.
The local filesystem may or may not be case sensitive depending on OS.
* Windows - usuall case insensitive
* OSX - usually case insensitive, though it is possible to format case sensitive
* Linux - usually case sensitive, but there are case insensitive file systems (eg FAT formatted USB keys)
Most of the time this doesn't cause any problems as people tend to
avoid files whose name differs only by case even on case sensitive
systems.
### Duplicate files ###
If a cloud storage system allows duplicate files then it can have two
objects with the same name.
This confuses rclone greatly when syncing.

View File

@@ -1,65 +0,0 @@
---
title: "Privacy Policy"
description: "Rclone Privacy Policy"
date: "2015-08-19"
---
# Rclone Privacy Policy #
## What is this Privacy Policy for? ##
This privacy policy is for this website http://rclone.org and governs the privacy of its users who choose to use it.
The policy sets out the different areas where user privacy is concerned and outlines the obligations & requirements of the users, the website and website owners. Furthermore the way this website processes, stores and protects user data and information will also be detailed within this policy.
## The Website ##
This website and its owners take a proactive approach to user privacy and ensure the necessary steps are taken to protect the privacy of its users throughout their visiting experience. This website complies to all UK national laws and requirements for user privacy.
## Use of Cookies ##
This website uses cookies to better the users experience while visiting the website. Where applicable this website uses a cookie control system allowing the user on their first visit to the website to allow or disallow the use of cookies on their computer / device. This complies with recent legislation requirements for websites to obtain explicit consent from users before leaving behind or reading files such as cookies on a user's computer / device.
Cookies are small files saved to the user's computers hard drive that track, save and store information about the user's interactions and usage of the website. This allows the website, through its server to provide the users with a tailored experience within this website.
Users are advised that if they wish to deny the use and saving of cookies from this website on to their computers hard drive they should take necessary steps within their web browsers security settings to block all cookies from this website and its external serving vendors.
This website uses tracking software to monitor its visitors to better understand how they use it. This software is provided by Google Analytics which uses cookies to track visitor usage. The software will save a cookie to your computers hard drive in order to track and monitor your engagement and usage of the website, but will not store, save or collect personal information. You can read [Google's privacy policy here](http://www.google.com/privacy.html) for further information.
Other cookies may be stored to your computers hard drive by external vendors when this website uses referral programs, sponsored links or adverts. Such cookies are used for conversion and referral tracking and typically expire after 30 days, though some may take longer. No personal information is stored, saved or collected.
## Contact & Communication ##
Users contacting this website and/or its owners do so at their own discretion and provide any such personal details requested at their own risk. Your personal information is kept private and stored securely until a time it is no longer required or has no use, as detailed in the Data Protection Act 1998.
This website and its owners use any information submitted to provide you with further information about the products / services they offer or to assist you in answering any questions or queries you may have submitted.
## External Links ##
Although this website only looks to include quality, safe and relevant external links, users are advised adopt a policy of caution before clicking any external web links mentioned throughout this website.
The owners of this website cannot guarantee or verify the contents of any externally linked website despite their best efforts. Users should therefore note they click on external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
## Adverts and Sponsored Links ##
This website may contain sponsored links and adverts. These will typically be served through our advertising partners, to whom may have detailed privacy policies relating directly to the adverts they serve.
Clicking on any such adverts will send you to the advertisers website through a referral program which may use cookies and will track the number of referrals sent from this website. This may include the use of cookies which may in turn be saved on your computers hard drive. Users should therefore note they click on sponsored external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
### Social Media Platforms ##
Communication, engagement and actions taken through external social media platforms that this website and its owners participate on are subject to the terms and conditions as well as the privacy policies held with each social media platform respectively.
Users are advised to use social media platforms wisely and communicate / engage upon them with due care and caution in regard to their own privacy and personal details. This website nor its owners will ever ask for personal or sensitive information through social media platforms and encourage users wishing to discuss sensitive details to contact them through primary communication channels such as email.
This website may use social sharing buttons which help share web content directly from web pages to the social media platform in question. Users are advised before using such social sharing buttons that they do so at their own discretion and note that the social media platform may track and save your request to share a web page respectively through your social media platform account.
## Resources & Further Information ##
* [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents)
* [Privacy and Electronic Communications Regulations 2003](http://www.legislation.gov.uk/uksi/2003/2426/contents/made)
* [Privacy and Electronic Communications Regulations 2003 - The Guide](https://ico.org.uk/for-organisations/guide-to-pecr/)
* [Twitter Privacy Policy](http://twitter.com/privacy)
* [Facebook Privacy Policy](http://www.facebook.com/about/privacy/)
* [Google Privacy Policy](http://www.google.com/privacy.html)
* [Sample Website Privacy Policy](http://www.jamieking.co.uk/resources/free_sample_privacy_policy.html)

View File

@@ -27,55 +27,41 @@ Choose a number from below
1) swift
2) s3
3) local
4) google cloud storage
5) dropbox
6) drive
4) drive
type> 2
AWS Access Key ID.
access_key_id> accesskey
AWS Secret Access Key (password).
secret_access_key> secretaccesskey
Region to connect to.
Endpoint for S3 API.
Choose a number from below, or type in your own value
* The default endpoint - a good choice if you are unsure.
* US Region, Northern Virginia or Pacific Northwest.
* Leave location constraint empty.
1) us-east-1
* US West (Oregon) Region
* Needs location constraint us-west-2.
2) us-west-2
1) https://s3.amazonaws.com/
* US Region, Northern Virginia only.
* Leave location constraint empty.
2) https://s3-external-1.amazonaws.com
[snip]
* South America (Sao Paulo) Region
* Needs location constraint sa-east-1.
9) sa-east-1
* If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.
10) other-v2-signature
* If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.
11) other-v4-signature
region> 1
Endpoint for S3 API.
Leave blank if using AWS to use the default endpoint for the region.
Specify if using an S3 clone such as Ceph.
endpoint>
Location constraint - must be set to match the Region. Used when creating buckets only.
9) https://s3-sa-east-1.amazonaws.com
endpoint> 1
Location constraint - must be set to match the Endpoint.
Choose a number from below, or type in your own value
* Empty for US Region, Northern Virginia or Pacific Northwest.
1)
* US West (Oregon) Region.
2) us-west-2
* US West (Northern California) Region.
3) us-west-1
* EU (Ireland) Region.
4) eu-west-1
[snip]
* South America (Sao Paulo) Region.
9) sa-east-1
location_constraint> 1
Remote config
--------------------
[remote]
access_key_id = accesskey
secret_access_key = secretaccesskey
region = us-east-1
endpoint =
endpoint = https://s3.amazonaws.com/
location_constraint =
--------------------
y) Yes this is OK
@@ -114,98 +100,8 @@ files in the bucket.
rclone sync /home/local/directory remote:bucket
### Modified time ###
Modified time
-------------
The modified time is stored as metadata on the object as
`X-Amz-Meta-Mtime` as floating point since the epoch accurate to 1 ns.
### Multipart uploads ###
rclone supports multipart uploads with S3 which means that it can
upload files bigger than 5GB. Note that files uploaded with multipart
upload don't have an MD5SUM.
### Buckets and Regions ###
With Amazon S3 you can list buckets (`rclone lsd`) using any region,
but you can only access the content of a bucket from the region it was
created in. If you attempt to access a bucket from the wrong region,
you will get an error, `incorrect region, the bucket is not in 'XXX'
region`.
### Anonymous access to public buckets ###
If you want to use rclone to access a public bucket, configure with a
blank `access_key_id` and `secret_access_key`. Eg
```
e) Edit existing remote
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> anons3
What type of source is it?
Choose a number from below
1) amazon cloud drive
2) drive
3) dropbox
4) google cloud storage
5) local
6) s3
7) swift
type> 6
AWS Access Key ID - leave blank for anonymous access.
access_key_id>
AWS Secret Access Key (password) - leave blank for anonymous access.
secret_access_key>
Region to connect to.
region> 1
endpoint>
location_constraint>
```
Then use it as normal with the name of the public bucket, eg
rclone lsd anons3:1000genomes
You will be able to list and copy data but not upload it.
### Ceph ###
Ceph is an object storage system which presents an Amazon S3 interface.
To use rclone with ceph, you need to set the following parameters in
the config.
```
access_key_id = Whatever
secret_access_key = Whatever
endpoint = https://ceph.endpoint.goes.here/
region = other-v2-signature
```
Note also that Ceph sometimes puts `/` in the passwords it gives
users. If you read the secret access key using the command line tools
you will get a JSON blob with the `/` escaped as `\/`. Make sure you
only write `/` in the secret access key.
Eg the dump from Ceph looks something like this (irrelevant keys
removed).
```
{
"user_id": "xxx",
"display_name": "xxxx",
"keys": [
{
"user": "xxx",
"access_key": "xxxxxx",
"secret_key": "xxxxxx\/xxxx"
}
],
}
```
Because this is a json dump, it is encoding the `/` as `\/`, so if you
use the secret key as `xxxxxx/xxxx` it will work fine.

View File

@@ -52,15 +52,12 @@ Choose a number from below, or type in your own value
* Memset Memstore UK v2
5) https://auth.storage.memset.com/v2.0
auth> 1
Tenant name - optional
tenant>
Remote config
--------------------
[remote]
user = user_name
key = password_or_api_key
auth = https://auth.api.rackspacecloud.com/v1.0
tenant =
--------------------
y) Yes this is OK
e) Edit this remote
@@ -87,7 +84,8 @@ excess files in the container.
rclone sync /home/local/directory remote:container
### Modified time ###
Modified time
-------------
The modified time is stored as metadata on the object as
`X-Object-Meta-Mtime` as floating point since the epoch accurate to 1

View File

@@ -3,7 +3,6 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="{{ .Description }}">
<meta name="author" content="Nick Craig-Wood">
<link rel="shortcut icon" type="image/png" href="/img/rclone-16x16.png"/>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),

View File

@@ -12,35 +12,19 @@
<div class="collapse navbar-collapse navbar-ex1-collapse">
<ul class="nav navbar-nav">
<li><a href="/downloads/"><i class="fa fa-cloud-download"></i> Downloads</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Docs</a>
<ul class="dropdown-menu">
<li><a href="/install/"><i class="fa fa-book"></i> Installation</a></li>
<li><a href="/docs/"><i class="fa fa-book"></i> Usage</a></li>
<li><a href="/filtering/"><i class="fa fa-book"></i> Filtering</a></li>
<li><a href="/changelog/"><i class="fa fa-book"></i> Changelog</a></li>
<li><a href="/bugs/"><i class="fa fa-book"></i> Bugs</a></li>
<li><a href="/faq/"><i class="fa fa-book"></i> FAQ</a></li>
<li><a href="/licence/"><i class="fa fa-book"></i> Licence</a></li>
<li><a href="/authors/"><i class="fa fa-book"></i> Authors</a></li>
<li><a href="/donate/"><i class="fa fa-book"></i> Donate</a></li>
<li><a href="/privacy/"><i class="fa fa-book"></i> Privacy Policy</a></li>
</ul>
</li>
<li><a href="/docs/"><i class="fa fa-book"></i> Docs</a></li>
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Storage Systems</a>
<ul class="dropdown-menu">
<li><a href="/overview/"><i class="fa fa-archive"></i> Overview</a></li>
<li><a href="/drive/"><i class="fa fa-google"></i> Drive</a></li>
<li><a href="/s3/"><i class="fa fa-archive"></i> S3</a></li>
<li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Swift</a></li>
<li><a href="/dropbox/"><i class="fa fa-dropbox"></i> Dropbox</a></li>
<li><a href="/googlecloudstorage/"><i class="fa fa-google"></i> Google Cloud Storage</a></li>
<li><a href="/amazonclouddrive/"><i class="fa fa-archive"></i> Amazon Cloud Drive</a></li>
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
</ul>
</li>
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
</ul>
</div>
</div>

View File

@@ -4,23 +4,4 @@ body {
footer {
margin: 50px 0;
}
table {
background-color:#e0e0ff
}
tbody td, th {
border: 1px solid black;
padding: 3px 7px 2px 7px;
}
thead td, th {
border: 1px solid black;
padding: 3px 7px 2px 7px;
font-weight: bold;
}
tbody tr:nth-child(odd) {
background-color:#d0d0ff
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1019 B

File diff suppressed because it is too large Load Diff

View File

@@ -1,56 +0,0 @@
// Test Drive filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package drive_test
import (
"testing"
"github.com/ncw/rclone/drive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func init() {
fstests.NilObject = fs.Object((*drive.FsObjectDrive)(nil))
fstests.RemoteName = "TestDrive:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,247 +0,0 @@
// Upload for drive
//
// Docs
// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
// Files insert: https://developers.google.com/drive/v2/reference/files/insert
// Files update: https://developers.google.com/drive/v2/reference/files/update
//
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
package drive
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/ncw/rclone/fs"
"google.golang.org/api/drive/v2"
"google.golang.org/api/googleapi"
)
const (
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
statusResumeIncomplete = 308
// Number of times to try each chunk
maxTries = 10
)
// resumableUpload is used by the generated APIs to provide resumable uploads.
// It is not used by developers directly.
type resumableUpload struct {
f *FsDrive
remote string
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
URI string
// Media is the object being uploaded.
Media io.Reader
// MediaType defines the media type, e.g. "image/jpeg".
MediaType string
// ContentLength is the full size of the object being uploaded.
ContentLength int64
// Return value
ret *drive.File
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) {
fileID := info.Id
var body io.Reader
body, err := googleapi.WithoutDataWrapper.JSONReader(info)
if err != nil {
return nil, err
}
params := make(url.Values)
params.Set("alt", "json")
params.Set("uploadType", "resumable")
urls := "https://www.googleapis.com/upload/drive/v2/files"
method := "POST"
if fileID != "" {
params.Set("setModifiedDate", "true")
urls += "/{fileId}"
method = "PUT"
}
urls += "?" + params.Encode()
req, _ := http.NewRequest(method, urls, body)
googleapi.Expand(req.URL, map[string]string{
"fileId": fileID,
})
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType)
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
req.Header.Set("User-Agent", fs.UserAgent)
var res *http.Response
err = f.pacer.Call(func() (bool, error) {
res, err = f.client.Do(req)
if err == nil {
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
return shouldRetry(err)
})
if err != nil {
return nil, err
}
loc := res.Header.Get("Location")
rx := &resumableUpload{
f: f,
remote: remote,
URI: loc,
Media: in,
MediaType: contentType,
ContentLength: size,
}
return rx.Upload()
}
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(start int64, body []byte) *http.Request {
reqSize := int64(len(body))
req, _ := http.NewRequest("POST", rx.URI, bytes.NewBuffer(body))
req.ContentLength = reqSize
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
}
req.Header.Set("Content-Type", rx.MediaType)
req.Header.Set("User-Agent", fs.UserAgent)
return req
}
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, fmt.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, fmt.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(start int64, body []byte) (int, error) {
req := rx.makeRequest(start, body)
res, err := rx.f.client.Do(req)
if err != nil {
return 599, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == statusResumeIncomplete {
return res.StatusCode, nil
}
err = googleapi.CheckResponse(res)
if err != nil {
return res.StatusCode, err
}
// When the entire file upload is complete, the server
// responds with an HTTP 201 Created along with any metadata
// associated with this resource. If this request had been
// updating an existing entity rather than creating a new one,
// the HTTP response code for a completed upload would have
// been 200 OK.
//
// So parse the response out of the body. We aren't expecting
// any other 2xx codes, so we parse it unconditionaly on
// StatusCode
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
return 598, err
}
return res.StatusCode, nil
}
// Upload uploads the chunks from the input
// It retries each chunk maxTries times (with a pause of uploadPause between attempts).
func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
buf := make([]byte, chunkSize)
var StatusCode int
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
} else {
buf = buf[:reqSize]
}
// Read the chunk
_, err := io.ReadFull(rx.Media, buf)
if err != nil {
return nil, err
}
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debug(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(start, buf)
again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false
err = nil
}
return again, err
})
if err != nil {
return nil, err
}
start += reqSize
}
// Resume or retry uploads that fail due to connection interruptions or
// any 5xx errors, including:
//
// 500 Internal Server Error
// 502 Bad Gateway
// 503 Service Unavailable
// 504 Gateway Timeout
//
// Use an exponential backoff strategy if any 5xx server error is
// returned when resuming or retrying upload requests. These errors can
// occur if a server is getting overloaded. Exponential backoff can help
// alleviate these kinds of problems during periods of high volume of
// requests or heavy network traffic. Other kinds of requests should not
// be handled by exponential backoff but you can still retry a number of
// them. When retrying these requests, limit the number of times you
// retry them. For example your code could limit to ten retries or less
// before reporting an error.
//
// Handle 404 Not Found errors when doing resumable uploads by starting
// the entire upload over from the beginning.
if rx.ret == nil {
return nil, fs.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
}
return rx.ret, nil
}

View File

@@ -1,10 +1,22 @@
// Package dropbox provides an interface to Dropbox object storage
// Dropbox interface
package dropbox
/*
Limitations of dropbox
File system is case insensitive
The datastore is limited to 100,000 records which therefore is the
limit of the number of files that rclone can use on dropbox.
FIXME only open datastore if we need it?
FIXME Getting this sometimes
Failed to copy: Upload failed: invalid character '<' looking for beginning of value
This is a JSON decode error - from Update / UploadByChunk
- Caused by 500 error from dropbox
- See https://github.com/stacktic/dropbox/issues/1
- Possibly confusing dropbox with excess concurrency?
*/
import (
@@ -12,55 +24,49 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/spf13/pflag"
"github.com/stacktic/dropbox"
)
// Constants
const (
rcloneAppKey = "5jcck7diasz0rqy"
rcloneAppSecret = "m8WRxJ6b1Z/Y25fDwJWS"
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
)
var (
// A regexp matching path names for files Dropbox ignores
// See https://www.dropbox.com/en/help/145 - Ignored files
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
// Upload chunk size - setting too small makes uploads slow.
// Chunks aren't buffered into memory though so can set large.
uploadChunkSize = fs.SizeSuffix(128 * 1024 * 1024)
maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
rcloneAppKey = "5jcck7diasz0rqy"
rcloneAppSecret = "1n9m04y2zx7bf26"
uploadChunkSize = 64 * 1024 // chunk size for upload
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
datastoreName = "rclone"
tableName = "metadata"
md5sumField = "md5sum"
mtimeField = "mtime"
maxCommitRetries = 5
RFC3339In = time.RFC3339
RFC3339Out = "2006-01-02T15:04:05.000000000Z07:00"
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
fs.Register(&fs.FsInfo{
Name: "dropbox",
NewFs: NewFs,
Config: configHelper,
Config: Config,
Options: []fs.Option{{
Name: "app_key",
Help: "Dropbox App Key - leave blank normally.",
Help: "Dropbox App Key - leave blank to use rclone's.",
}, {
Name: "app_secret",
Help: "Dropbox App Secret - leave blank normally.",
Help: "Dropbox App Secret - leave blank to use rclone's.",
}},
})
pflag.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
}
// Configuration helper - called after the user has put in the defaults
func configHelper(name string) {
func Config(name string) {
// See if already have a token
token := fs.ConfigFile.MustValue(name, "token")
if token != "" {
@@ -71,10 +77,7 @@ func configHelper(name string) {
}
// Get a dropbox
db, err := newDropbox(name)
if err != nil {
log.Fatalf("Failed to create dropbox client: %v", err)
}
db := newDropbox(name)
// This method will ask the user to visit an URL and paste the generated code.
if err := db.Auth(); err != nil {
@@ -94,41 +97,35 @@ func configHelper(name string) {
// FsDropbox represents a remote dropbox server
type FsDropbox struct {
name string // name of this remote
db *dropbox.Dropbox // the connection to the dropbox server
root string // the path we are working on
slashRoot string // root with "/" prefix, lowercase
slashRootSlash string // root with "/" prefix and postfix, lowercase
db *dropbox.Dropbox // the connection to the dropbox server
root string // the path we are working on
slashRoot string // root with "/" prefix
slashRootSlash string // root with "/" prefix and postix
datastoreManager *dropbox.DatastoreManager
datastore *dropbox.Datastore
table *dropbox.Table
datastoreMutex sync.Mutex // lock this when using the datastore
datastoreErr error // pending errors on the datastore
}
// FsObjectDropbox describes a dropbox object
type FsObjectDropbox struct {
dropbox *FsDropbox // what this object is part of
remote string // The remote path
bytes int64 // size of the object
modTime time.Time // time it was last modified
hasMetadata bool // metadata is valid
dropbox *FsDropbox // what this object is part of
remote string // The remote path
md5sum string // md5sum of the object
bytes int64 // size of the object
modTime time.Time // time it was last modified
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *FsDropbox) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *FsDropbox) Root() string {
return f.root
}
// String converts this FsDropbox to a string
func (f *FsDropbox) String() string {
return fmt.Sprintf("Dropbox root '%s'", f.root)
}
// Makes a new dropbox from the config
func newDropbox(name string) (*dropbox.Dropbox, error) {
func newDropbox(name string) *dropbox.Dropbox {
db := dropbox.NewDropbox()
appKey := fs.ConfigFile.MustValue(name, "app_key")
@@ -137,37 +134,34 @@ func newDropbox(name string) (*dropbox.Dropbox, error) {
}
appSecret := fs.ConfigFile.MustValue(name, "app_secret")
if appSecret == "" {
appSecret = fs.Reveal(rcloneAppSecret)
appSecret = rcloneAppSecret
}
err := db.SetAppInfo(appKey, appSecret)
return db, err
db.SetAppInfo(appKey, appSecret)
return db
}
// NewFs contstructs an FsDropbox from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
if uploadChunkSize > maxUploadChunkSize {
return nil, fmt.Errorf("Chunk size too big, must be < %v", maxUploadChunkSize)
}
db, err := newDropbox(name)
if err != nil {
return nil, err
}
db := newDropbox(name)
f := &FsDropbox{
name: name,
db: db,
db: db,
}
f.setRoot(root)
// Read the token from the config file
token := fs.ConfigFile.MustValue(name, "token")
// Set our custom context which enables our custom transport for timeouts etc
db.SetContext(oauthutil.Context())
// Authorize the client
db.SetAccessToken(token)
// Make a db to store rclone metadata in
f.datastoreManager = db.NewDatastoreManager()
// Open the datastore in the background
go f.openDataStore()
// See if the root is actually an object
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
if err == nil && !entry.IsDir {
@@ -188,19 +182,39 @@ func NewFs(name, root string) (fs.Fs, error) {
// Sets root in f
func (f *FsDropbox) setRoot(root string) {
f.root = strings.Trim(root, "/")
lowerCaseRoot := strings.ToLower(f.root)
f.slashRoot = "/" + lowerCaseRoot
f.slashRoot = "/" + f.root
f.slashRootSlash = f.slashRoot
if lowerCaseRoot != "" {
if f.root != "" {
f.slashRootSlash += "/"
}
}
// Opens the datastore in f
func (f *FsDropbox) openDataStore() {
f.datastoreMutex.Lock()
defer f.datastoreMutex.Unlock()
fs.Debug(f, "Open rclone datastore")
// Open the rclone datastore
var err error
f.datastore, err = f.datastoreManager.OpenDatastore(datastoreName)
if err != nil {
fs.Log(f, "Failed to open datastore: %v", err)
f.datastoreErr = err
return
}
// Get the table we are using
f.table, err = f.datastore.GetTable(tableName)
if err != nil {
fs.Log(f, "Failed to open datastore table: %v", err)
f.datastoreErr = err
return
}
fs.Debug(f, "Open rclone datastore finished")
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) (fs.Object, error) {
o := &FsObjectDropbox{
dropbox: f,
remote: remote,
@@ -211,49 +225,49 @@ func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.O
err := o.readEntryAndSetMetadata()
if err != nil {
// logged already fs.Debug("Failed to read info: %s", err)
return nil
return nil, err
}
}
return o
return o, nil
}
// NewFsObject returns an FsObject from a path
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDropbox) NewFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
fs, _ := f.newFsObjectWithInfo(remote, info)
// Errors have already been logged
return fs
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDropbox) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
return f.NewFsObjectWithInfo(remote, nil)
}
// Strips the root off path and returns it
func (f *FsDropbox) stripRoot(path string) *string {
lowercase := strings.ToLower(path)
if !strings.HasPrefix(lowercase, f.slashRootSlash) {
fs.Stats.Error()
fs.ErrorLog(f, "Path '%s' is not under root '%s'", path, f.slashRootSlash)
return nil
// Strips the root off entry and returns it
func (f *FsDropbox) stripRoot(entry *dropbox.Entry) string {
path := entry.Path
if strings.HasPrefix(path, f.slashRootSlash) {
path = path[len(f.slashRootSlash):]
}
stripped := path[len(f.slashRootSlash):]
return &stripped
return path
}
// Walk the root returning a channel of FsObjects
func (f *FsDropbox) list(out fs.ObjectsChan) {
// Track path component case, it could be different for entries coming from DropBox API
// See https://www.dropboxforum.com/hc/communities/public/questions/201665409-Wrong-character-case-of-folder-name-when-calling-listFolder-using-Sync-API?locale=en-us
// and https://github.com/ncw/rclone/issues/53
nameTree := newNameTree()
cursor := ""
for {
deltaPage, err := f.db.Delta(cursor, f.slashRoot)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list: %s", err)
fs.Log(f, "Couldn't list: %s", err)
break
} else {
if deltaPage.Reset && cursor != "" {
fs.ErrorLog(f, "Unexpected reset during listing - try again")
fs.Log(f, "Unexpected reset during listing - try again")
fs.Stats.Error()
break
}
@@ -263,61 +277,32 @@ func (f *FsDropbox) list(out fs.ObjectsChan) {
entry := deltaEntry.Entry
if entry == nil {
// This notifies of a deleted object
fs.Debug(f, "Deleting metadata for %q", deltaEntry.Path)
key := metadataKey(deltaEntry.Path) // Path is lowercased
err := f.deleteMetadata(key)
if err != nil {
fs.Debug(f, "Failed to delete metadata for %q", deltaEntry.Path)
// Don't accumulate Error here
}
} else {
if len(entry.Path) <= 1 || entry.Path[0] != '/' {
fs.Stats.Error()
fs.ErrorLog(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path)
continue
}
lastSlashIndex := strings.LastIndex(entry.Path, "/")
var parentPath string
if lastSlashIndex == 0 {
parentPath = ""
} else {
parentPath = entry.Path[1:lastSlashIndex]
}
lastComponent := entry.Path[lastSlashIndex+1:]
if entry.IsDir {
nameTree.PutCaseCorrectDirectoryName(parentPath, lastComponent)
// ignore directories
} else {
parentPathCorrectCase := nameTree.GetPathWithCorrectCase(parentPath)
if parentPathCorrectCase != nil {
path := f.stripRoot(*parentPathCorrectCase + "/" + lastComponent)
if path == nil {
// an error occurred and logged by stripRoot
continue
}
out <- f.newFsObjectWithInfo(*path, entry)
} else {
nameTree.PutFile(parentPath, lastComponent, entry)
}
path := f.stripRoot(entry)
out <- f.NewFsObjectWithInfo(path, entry)
}
}
}
if !deltaPage.HasMore {
break
}
cursor = deltaPage.Cursor.Cursor
cursor = deltaPage.Cursor
}
}
walkFunc := func(caseCorrectFilePath string, entry *dropbox.Entry) {
path := f.stripRoot("/" + caseCorrectFilePath)
if path == nil {
// an error occurred and logged by stripRoot
return
}
out <- f.newFsObjectWithInfo(*path, entry)
}
nameTree.WalkFiles(f.root, walkFunc)
}
// List walks the path returning a channel of FsObjects
// Walk the path returning a channel of FsObjects
func (f *FsDropbox) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
@@ -327,7 +312,7 @@ func (f *FsDropbox) List() fs.ObjectsChan {
return out
}
// ListDir walks the path returning a channel of FsObjects
// Walk the path returning a channel of FsObjects
func (f *FsDropbox) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
@@ -335,21 +320,15 @@ func (f *FsDropbox) ListDir() fs.DirChan {
entry, err := f.db.Metadata(f.root, true, false, "", "", metadataLimit)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list directories in root: %s", err)
fs.Log(f, "Couldn't list directories in root: %s", err)
} else {
for i := range entry.Contents {
entry := &entry.Contents[i]
if entry.IsDir {
name := f.stripRoot(entry.Path)
if name == nil {
// an error occurred and logged by stripRoot
continue
}
out <- &fs.Dir{
Name: *name,
Name: f.stripRoot(entry),
When: time.Time(entry.ClientMtime),
Bytes: entry.Bytes,
Bytes: int64(entry.Bytes),
Count: -1,
}
}
@@ -412,38 +391,9 @@ func (f *FsDropbox) Rmdir() error {
return f.Purge()
}
// Precision returns the precision
func (f *FsDropbox) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *FsDropbox) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*FsObjectDropbox)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary FsObject under construction
dstObj := &FsObjectDropbox{dropbox: f, remote: remote}
srcPath := srcObj.remotePath()
dstPath := dstObj.remotePath()
entry, err := f.db.Copy(srcPath, dstPath, false)
if err != nil {
return nil, fmt.Errorf("Copy failed: %s", err)
}
dstObj.setMetadataFromEntry(entry)
return dstObj, nil
// Return the precision
func (fs *FsDropbox) Precision() time.Duration {
return time.Nanosecond
}
// Purge deletes all the files and the container
@@ -452,71 +402,84 @@ func (f *FsDropbox) Copy(src fs.Object, remote string) (fs.Object, error) {
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *FsDropbox) Purge() error {
// Delete metadata first
var wg sync.WaitGroup
to_be_deleted := f.List()
wg.Add(fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
go func() {
defer wg.Done()
for dst := range to_be_deleted {
o := dst.(*FsObjectDropbox)
o.deleteMetadata()
}
}()
}
wg.Wait()
// Let dropbox delete the filesystem tree
_, err := f.db.Delete(f.slashRoot)
return err
}
// Move src to this remote using server side move operations.
// Tries the transaction in fn then calls commit, repeating until retry limit
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *FsDropbox) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*FsObjectDropbox)
if !ok {
fs.Debug(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
// Holds datastore mutex while in progress
func (f *FsDropbox) transaction(fn func() error) error {
f.datastoreMutex.Lock()
defer f.datastoreMutex.Unlock()
if f.datastoreErr != nil {
return f.datastoreErr
}
var err error
for i := 1; i <= maxCommitRetries; i++ {
err = fn()
if err != nil {
return err
}
// Temporary FsObject under construction
dstObj := &FsObjectDropbox{dropbox: f, remote: remote}
srcPath := srcObj.remotePath()
dstPath := dstObj.remotePath()
entry, err := f.db.Move(srcPath, dstPath)
err = f.datastore.Commit()
if err == nil {
break
}
fs.Debug(f, "Retrying transaction %d/%d", i, maxCommitRetries)
}
if err != nil {
return nil, fmt.Errorf("Move failed: %s", err)
}
dstObj.setMetadataFromEntry(entry)
return dstObj, nil
}
// DirMove moves src to this remote using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *FsDropbox) DirMove(src fs.Fs) error {
srcFs, ok := src.(*FsDropbox)
if !ok {
fs.Debug(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
// Check if destination exists
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
if err == nil && !entry.IsDeleted {
return fs.ErrorDirExists
}
// Do the move
_, err = f.db.Move(srcFs.slashRoot, f.slashRoot)
if err != nil {
return fmt.Errorf("MoveDir failed: %v", err)
return fmt.Errorf("Failed to commit metadata changes: %s", err)
}
return nil
}
// Deletes the medadata associated with this key
func (f *FsDropbox) deleteMetadata(key string) error {
return f.transaction(func() error {
record, err := f.table.Get(key)
if err != nil {
return fmt.Errorf("Couldn't get record: %s", err)
}
if record == nil {
return nil
}
record.DeleteRecord()
return nil
})
}
// Reads the record attached to key
//
// Holds datastore mutex while in progress
func (f *FsDropbox) readRecord(key string) (*dropbox.Record, error) {
f.datastoreMutex.Lock()
defer f.datastoreMutex.Unlock()
if f.datastoreErr != nil {
return nil, f.datastoreErr
}
return f.table.Get(key)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
// Return the parent Fs
func (o *FsObjectDropbox) Fs() fs.Fs {
return o.dropbox
}
@@ -529,14 +492,39 @@ func (o *FsObjectDropbox) String() string {
return o.remote
}
// Remote returns the remote path
// Return the remote path
func (o *FsObjectDropbox) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
//
// FIXME has to download the file!
func (o *FsObjectDropbox) Md5sum() (string, error) {
return "", nil
if o.md5sum != "" {
return o.md5sum, nil
}
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
return "", fmt.Errorf("Failed to read metadata: %s", err)
}
// For pre-existing files which have no md5sum can read it and set it?
// in, err := o.Open()
// if err != nil {
// return "", err
// }
// defer in.Close()
// hash := md5.New()
// _, err = io.Copy(hash, in)
// if err != nil {
// return "", err
// }
// o.md5sum = fmt.Sprintf("%x", hash.Sum(nil))
return o.md5sum, nil
}
// Size returns the size of an object in bytes
@@ -548,9 +536,8 @@ func (o *FsObjectDropbox) Size() int64 {
//
// This isn't a complete set of metadata and has an inacurate date
func (o *FsObjectDropbox) setMetadataFromEntry(info *dropbox.Entry) {
o.bytes = info.Bytes
o.bytes = int64(info.Bytes)
o.modTime = time.Time(info.ClientMtime)
o.hasMetadata = true
}
// Reads the entry from dropbox
@@ -586,9 +573,7 @@ func (o *FsObjectDropbox) remotePath() string {
func metadataKey(path string) string {
// NB File system is case insensitive
path = strings.ToLower(path)
hash := md5.New()
_, _ = hash.Write([]byte(path))
return fmt.Sprintf("%x", hash.Sum(nil))
return fmt.Sprintf("%x", md5.Sum([]byte(path)))
}
// Returns the key for the metadata database
@@ -598,11 +583,58 @@ func (o *FsObjectDropbox) metadataKey() string {
// readMetaData gets the info if it hasn't already been fetched
func (o *FsObjectDropbox) readMetaData() (err error) {
if o.hasMetadata {
if o.md5sum != "" {
return nil
}
// fs.Debug(o, "Reading metadata from datastore")
record, err := o.dropbox.readRecord(o.metadataKey())
if err != nil {
fs.Debug(o, "Couldn't read metadata: %s", err)
record = nil
}
if record != nil {
// Read md5sum
md5sumInterface, ok, err := record.Get(md5sumField)
if err != nil {
return err
}
if !ok {
fs.Debug(o, "Couldn't find md5sum in record")
} else {
md5sum, ok := md5sumInterface.(string)
if !ok {
fs.Debug(o, "md5sum not a string")
} else {
o.md5sum = md5sum
}
}
// read mtime
mtimeInterface, ok, err := record.Get(mtimeField)
if err != nil {
return err
}
if !ok {
fs.Debug(o, "Couldn't find mtime in record")
} else {
mtime, ok := mtimeInterface.(string)
if !ok {
fs.Debug(o, "mtime not a string")
} else {
modTime, err := time.Parse(RFC3339In, mtime)
if err != nil {
return err
}
o.modTime = modTime
}
}
}
// Last resort
return o.readEntryAndSetMetadata()
o.readEntryAndSetMetadata()
return nil
}
// ModTime returns the modification time of the object
@@ -618,15 +650,60 @@ func (o *FsObjectDropbox) ModTime() time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
// Sets the modification time of the local fs object into the record
// FIXME if we don't set md5sum what will that do?
func (o *FsObjectDropbox) setModTimeAndMd5sum(modTime time.Time, md5sum string) error {
key := o.metadataKey()
// fs.Debug(o, "Writing metadata to datastore")
return o.dropbox.transaction(func() error {
record, err := o.dropbox.table.GetOrInsert(key)
if err != nil {
return fmt.Errorf("Couldn't read record: %s", err)
}
if md5sum != "" {
err = record.Set(md5sumField, md5sum)
if err != nil {
return fmt.Errorf("Couldn't set md5sum record: %s", err)
}
}
if !modTime.IsZero() {
mtime := modTime.Format(RFC3339Out)
err := record.Set(mtimeField, mtime)
if err != nil {
return fmt.Errorf("Couldn't set mtime record: %s", err)
}
}
return nil
})
}
// Deletes the medadata associated with this file
//
// It logs any errors
func (o *FsObjectDropbox) deleteMetadata() {
fs.Debug(o, "Deleting metadata from datastore")
err := o.dropbox.deleteMetadata(o.metadataKey())
if err != nil {
fs.Log(o, "Error deleting metadata: %v", err)
fs.Stats.Error()
}
}
// Sets the modification time of the local fs object
//
// Commits the datastore
func (o *FsObjectDropbox) SetModTime(modTime time.Time) {
// FIXME not implemented
return
err := o.setModTimeAndMd5sum(modTime, "")
if err != nil {
fs.Stats.Error()
fs.Log(o, err.Error())
}
}
// Storable returns whether this object is storable
// Is this object storable
func (o *FsObjectDropbox) Storable() bool {
return true
}
@@ -643,31 +720,27 @@ func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
//
// The new object may have been created if an error is returned
func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) error {
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
fs.ErrorLog(o, "File name disallowed - not uploading")
return nil
}
entry, err := o.dropbox.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
// Calculate md5sum as we upload it
hash := md5.New()
rc := &readCloser{in: io.TeeReader(in, hash)}
entry, err := o.dropbox.db.UploadByChunk(rc, uploadChunkSize, o.remotePath(), true, "")
if err != nil {
return fmt.Errorf("Upload failed: %s", err)
}
o.setMetadataFromEntry(entry)
return nil
md5sum := fmt.Sprintf("%x", hash.Sum(nil))
return o.setModTimeAndMd5sum(modTime, md5sum)
}
// Remove an object
func (o *FsObjectDropbox) Remove() error {
o.deleteMetadata()
_, err := o.dropbox.db.Delete(o.remotePath())
return err
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*FsDropbox)(nil)
_ fs.Copier = (*FsDropbox)(nil)
_ fs.Purger = (*FsDropbox)(nil)
_ fs.Mover = (*FsDropbox)(nil)
_ fs.DirMover = (*FsDropbox)(nil)
_ fs.Object = (*FsObjectDropbox)(nil)
)
var _ fs.Fs = &FsDropbox{}
var _ fs.Purger = &FsDropbox{}
var _ fs.Object = &FsObjectDropbox{}

View File

@@ -1,56 +0,0 @@
// Test Dropbox filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package dropbox_test
import (
"testing"
"github.com/ncw/rclone/dropbox"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func init() {
fstests.NilObject = fs.Object((*dropbox.FsObjectDropbox)(nil))
fstests.RemoteName = "TestDropbox:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,179 +0,0 @@
package dropbox
import (
"bytes"
"fmt"
"strings"
"github.com/ncw/rclone/fs"
"github.com/stacktic/dropbox"
)
type nameTreeNode struct {
// Map from lowercase directory name to tree node
Directories map[string]*nameTreeNode
// Map from file name (case sensitive) to dropbox entry
Files map[string]*dropbox.Entry
// Empty string if exact case is unknown or root node
CaseCorrectName string
}
// ------------------------------------------------------------
func newNameTreeNode(caseCorrectName string) *nameTreeNode {
return &nameTreeNode{
CaseCorrectName: caseCorrectName,
Directories: make(map[string]*nameTreeNode),
Files: make(map[string]*dropbox.Entry),
}
}
func newNameTree() *nameTreeNode {
return newNameTreeNode("")
}
func (tree *nameTreeNode) String() string {
if len(tree.CaseCorrectName) == 0 {
return "nameTreeNode/<root>"
}
return fmt.Sprintf("nameTreeNode/%q", tree.CaseCorrectName)
}
func (tree *nameTreeNode) getTreeNode(path string) *nameTreeNode {
if len(path) == 0 {
// no lookup required, just return root
return tree
}
current := tree
for _, component := range strings.Split(path, "/") {
if len(component) == 0 {
fs.Stats.Error()
fs.ErrorLog(tree, "getTreeNode: path component is empty (full path %q)", path)
return nil
}
lowercase := strings.ToLower(component)
lookup := current.Directories[lowercase]
if lookup == nil {
lookup = newNameTreeNode("")
current.Directories[lowercase] = lookup
}
current = lookup
}
return current
}
func (tree *nameTreeNode) PutCaseCorrectDirectoryName(parentPath string, caseCorrectDirectoryName string) {
if len(caseCorrectDirectoryName) == 0 {
fs.Stats.Error()
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: empty caseCorrectDirectoryName is not allowed (parentPath: %q)", parentPath)
return
}
node := tree.getTreeNode(parentPath)
if node == nil {
return
}
lowerCaseDirectoryName := strings.ToLower(caseCorrectDirectoryName)
directory := node.Directories[lowerCaseDirectoryName]
if directory == nil {
directory = newNameTreeNode(caseCorrectDirectoryName)
node.Directories[lowerCaseDirectoryName] = directory
} else {
if len(directory.CaseCorrectName) > 0 {
fs.Stats.Error()
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: directory %q is already exists under parent path %q", caseCorrectDirectoryName, parentPath)
return
}
directory.CaseCorrectName = caseCorrectDirectoryName
}
}
func (tree *nameTreeNode) PutFile(parentPath string, caseCorrectFileName string, dropboxEntry *dropbox.Entry) {
node := tree.getTreeNode(parentPath)
if node == nil {
return
}
if node.Files[caseCorrectFileName] != nil {
fs.Stats.Error()
fs.ErrorLog(tree, "PutFile: file %q is already exists at %q", caseCorrectFileName, parentPath)
return
}
node.Files[caseCorrectFileName] = dropboxEntry
}
func (tree *nameTreeNode) GetPathWithCorrectCase(path string) *string {
if path == "" {
empty := ""
return &empty
}
var result bytes.Buffer
current := tree
for _, component := range strings.Split(path, "/") {
if component == "" {
fs.Stats.Error()
fs.ErrorLog(tree, "GetPathWithCorrectCase: path component is empty (full path %q)", path)
return nil
}
lowercase := strings.ToLower(component)
current = current.Directories[lowercase]
if current == nil || current.CaseCorrectName == "" {
return nil
}
_, _ = result.WriteString("/")
_, _ = result.WriteString(current.CaseCorrectName)
}
resultString := result.String()
return &resultString
}
type nameTreeFileWalkFunc func(caseCorrectFilePath string, entry *dropbox.Entry)
func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFileWalkFunc) {
var prefix string
if currentPath == "" {
prefix = ""
} else {
prefix = currentPath + "/"
}
for name, entry := range tree.Files {
walkFunc(prefix+name, entry)
}
for lowerCaseName, directory := range tree.Directories {
caseCorrectName := directory.CaseCorrectName
if caseCorrectName == "" {
fs.Stats.Error()
fs.ErrorLog(tree, "WalkFiles: exact name of the directory %q is unknown (parent path: %q)", lowerCaseName, currentPath)
continue
}
directory.walkFilesRec(prefix+caseCorrectName, walkFunc)
}
}
func (tree *nameTreeNode) WalkFiles(rootPath string, walkFunc nameTreeFileWalkFunc) {
node := tree.getTreeNode(rootPath)
if node == nil {
return
}
node.walkFilesRec(rootPath, walkFunc)
}

View File

@@ -1,124 +0,0 @@
package dropbox
import (
"testing"
"github.com/ncw/rclone/fs"
dropboxapi "github.com/stacktic/dropbox"
)
func assert(t *testing.T, shouldBeTrue bool, failMessage string) {
if !shouldBeTrue {
t.Fatal(failMessage)
}
}
func TestPutCaseCorrectDirectoryName(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutCaseCorrectDirectoryName("a/b", "C")
assert(t, tree.CaseCorrectName == "", "Root CaseCorrectName should be empty")
a := tree.Directories["a"]
assert(t, a.CaseCorrectName == "", "CaseCorrectName at 'a' should be empty")
b := a.Directories["b"]
assert(t, b.CaseCorrectName == "", "CaseCorrectName at 'a/b' should be empty")
c := b.Directories["c"]
assert(t, c.CaseCorrectName == "C", "CaseCorrectName at 'a/b/c' should be 'C'")
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
}
func TestPutCaseCorrectDirectoryNameEmptyComponent(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutCaseCorrectDirectoryName("/a", "C")
tree.PutCaseCorrectDirectoryName("b/", "C")
tree.PutCaseCorrectDirectoryName("a//b", "C")
assert(t, fs.Stats.GetErrors() == errors+3, "3 errors should be reported")
}
func TestPutCaseCorrectDirectoryNameEmptyParent(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutCaseCorrectDirectoryName("", "C")
c := tree.Directories["c"]
assert(t, c.CaseCorrectName == "C", "CaseCorrectName at 'c' should be 'C'")
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
}
func TestGetPathWithCorrectCase(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutCaseCorrectDirectoryName("a", "C")
assert(t, tree.GetPathWithCorrectCase("a/c") == nil, "Path for 'a' should not be available")
tree.PutCaseCorrectDirectoryName("", "A")
assert(t, *tree.GetPathWithCorrectCase("a/c") == "/A/C", "Path for 'a/c' should be '/A/C'")
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
}
func TestPutAndWalk(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
tree.PutCaseCorrectDirectoryName("", "A")
numCalled := 0
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
assert(t, entry.Path == "xxx", "entry.Path should be xxx")
numCalled++
}
tree.WalkFiles("", walkFunc)
assert(t, numCalled == 1, "walk func should be called only once")
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
}
func TestPutAndWalkWithPrefix(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
tree.PutCaseCorrectDirectoryName("", "A")
numCalled := 0
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
assert(t, entry.Path == "xxx", "entry.Path should be xxx")
numCalled++
}
tree.WalkFiles("A", walkFunc)
assert(t, numCalled == 1, "walk func should be called only once")
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
}
func TestPutAndWalkIncompleteTree(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
t.Fatal("Should not be called")
}
tree.WalkFiles("", walkFunc)
assert(t, fs.Stats.GetErrors() == errors+1, "One error should be reported")
}

View File

@@ -7,108 +7,51 @@ import (
"fmt"
"io"
"log"
"sort"
"strings"
"sync"
"time"
"github.com/VividCortex/ewma"
"github.com/tsenart/tb"
)
// Globals
var (
Stats = NewStats()
tokenBucket *tb.Bucket
Stats = NewStats()
)
// Start the token bucket if necessary
func startTokenBucket() {
if bwLimit > 0 {
tokenBucket = tb.NewBucket(int64(bwLimit), 100*time.Millisecond)
Log(nil, "Starting bandwidth limiter at %vBytes/s", &bwLimit)
}
}
// Stringset holds some strings
type StringSet map[string]bool
// stringSet holds a set of strings
type stringSet map[string]struct{}
// inProgress holds a synchronizes map of in progress transfers
type inProgress struct {
mu sync.Mutex
m map[string]*Account
}
// newInProgress makes a new inProgress object
func newInProgress() *inProgress {
return &inProgress{
m: make(map[string]*Account, Config.Transfers),
}
}
// set marks the name as in progress
func (ip *inProgress) set(name string, acc *Account) {
ip.mu.Lock()
defer ip.mu.Unlock()
ip.m[name] = acc
}
// clear marks the name as no longer in progress
func (ip *inProgress) clear(name string) {
ip.mu.Lock()
defer ip.mu.Unlock()
delete(ip.m, name)
}
// get gets the account for name, of nil if not found
func (ip *inProgress) get(name string) *Account {
ip.mu.Lock()
defer ip.mu.Unlock()
return ip.m[name]
}
// Strings returns all the strings in the stringSet
func (ss stringSet) Strings() []string {
// Strings returns all the strings in the StringSet
func (ss StringSet) Strings() []string {
strings := make([]string, 0, len(ss))
for name := range ss {
var out string
if acc := Stats.inProgress.get(name); acc != nil {
out = acc.String()
} else {
out = name
}
strings = append(strings, " * "+out)
for k := range ss {
strings = append(strings, k)
}
sorted := sort.StringSlice(strings)
sorted.Sort()
return sorted
return strings
}
// String returns all the file names in the stringSet joined by newline
func (ss stringSet) String() string {
return strings.Join(ss.Strings(), "\n")
// String returns all the strings in the StringSet joined by comma
func (ss StringSet) String() string {
return strings.Join(ss.Strings(), ", ")
}
// StatsInfo limits and accounts all transfers
// Stats limits and accounts all transfers
type StatsInfo struct {
lock sync.RWMutex
bytes int64
errors int64
checks int64
checking stringSet
checking StringSet
transfers int64
transferring stringSet
transferring StringSet
start time.Time
inProgress *inProgress
}
// NewStats cretates an initialised StatsInfo
func NewStats() *StatsInfo {
return &StatsInfo{
checking: make(stringSet, Config.Checkers),
transferring: make(stringSet, Config.Transfers),
checking: make(StringSet, Config.Checkers),
transferring: make(StringSet, Config.Transfers),
start: time.Now(),
inProgress: newInProgress(),
}
}
@@ -117,30 +60,29 @@ func (s *StatsInfo) String() string {
s.lock.RLock()
defer s.lock.RUnlock()
dt := time.Now().Sub(s.start)
dtSeconds := dt.Seconds()
dt_seconds := dt.Seconds()
speed := 0.0
if dt > 0 {
speed = float64(s.bytes) / 1024 / dtSeconds
speed = float64(s.bytes) / 1024 / dt_seconds
}
dtRounded := dt - (dt % (time.Second / 10))
buf := &bytes.Buffer{}
fmt.Fprintf(buf, `
Transferred: %10d Bytes (%7.2f kByte/s)
Errors: %10d
Checks: %10d
Transferred: %10d
Elapsed time: %10v
Elapsed time: %v
`,
s.bytes, speed,
s.errors,
s.checks,
s.transfers,
dtRounded)
dt)
if len(s.checking) > 0 {
fmt.Fprintf(buf, "Checking:\n%s\n", s.checking)
fmt.Fprintf(buf, "Checking: %s\n", s.checking)
}
if len(s.transferring) > 0 {
fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring)
fmt.Fprintf(buf, "Transferring: %s\n", s.transferring)
}
return buf.String()
}
@@ -171,23 +113,6 @@ func (s *StatsInfo) GetErrors() int64 {
return s.errors
}
// ResetCounters sets the counters (bytes, checks, errors, transfers) to 0
func (s *StatsInfo) ResetCounters() {
s.lock.RLock()
defer s.lock.RUnlock()
s.bytes = 0
s.errors = 0
s.checks = 0
s.transfers = 0
}
// ResetErrors sets the errors count to 0
func (s *StatsInfo) ResetErrors() {
s.lock.RLock()
defer s.lock.RUnlock()
s.errors = 0
}
// Errored returns whether there have been any errors
func (s *StatsInfo) Errored() bool {
s.lock.RLock()
@@ -199,14 +124,14 @@ func (s *StatsInfo) Errored() bool {
func (s *StatsInfo) Error() {
s.lock.Lock()
defer s.lock.Unlock()
s.errors++
s.errors += 1
}
// Checking adds a check into the stats
func (s *StatsInfo) Checking(o Object) {
s.lock.Lock()
defer s.lock.Unlock()
s.checking[o.Remote()] = struct{}{}
s.checking[o.Remote()] = true
}
// DoneChecking removes a check from the stats
@@ -214,21 +139,14 @@ func (s *StatsInfo) DoneChecking(o Object) {
s.lock.Lock()
defer s.lock.Unlock()
delete(s.checking, o.Remote())
s.checks++
}
// GetTransfers reads the number of transfers
func (s *StatsInfo) GetTransfers() int64 {
s.lock.RLock()
defer s.lock.RUnlock()
return s.transfers
s.checks += 1
}
// Transferring adds a transfer into the stats
func (s *StatsInfo) Transferring(o Object) {
s.lock.Lock()
defer s.lock.Unlock()
s.transferring[o.Remote()] = struct{}{}
s.transferring[o.Remote()] = true
}
// DoneTransferring removes a transfer from the stats
@@ -236,182 +154,36 @@ func (s *StatsInfo) DoneTransferring(o Object) {
s.lock.Lock()
defer s.lock.Unlock()
delete(s.transferring, o.Remote())
s.transfers++
s.transfers += 1
}
// Account limits and accounts for one transfer
type Account struct {
// The mutex is to make sure Read() and Close() aren't called
// concurrently. Unfortunately the persistent connection loop
// in http transport calls Read() after Do() returns on
// CancelRequest so this race can happen when it apparently
// shouldn't.
mu sync.Mutex
in io.ReadCloser
size int64
name string
statmu sync.Mutex // Separate mutex for stat values.
bytes int64 // Total number of bytes read
start time.Time // Start time of first read
lpTime time.Time // Time of last average measurement
lpBytes int // Number of bytes read since last measurement
avg ewma.MovingAverage // Moving average of last few measurements
exit chan struct{} // channel that will be closed when transfer is finished
in io.ReadCloser
bytes int64
}
// NewAccount makes a Account reader for an object
func NewAccount(in io.ReadCloser, obj Object) *Account {
acc := &Account{
in: in,
size: obj.Size(),
name: obj.Remote(),
exit: make(chan struct{}),
avg: ewma.NewMovingAverage(),
lpTime: time.Now(),
}
go acc.averageLoop()
Stats.inProgress.set(acc.name, acc)
return acc
}
func (file *Account) averageLoop() {
tick := time.NewTicker(time.Second)
defer tick.Stop()
for {
select {
case now := <-tick.C:
file.statmu.Lock()
// Add average of last second.
elapsed := now.Sub(file.lpTime).Seconds()
avg := float64(file.lpBytes) / elapsed
file.avg.Add(avg)
file.lpBytes = 0
file.lpTime = now
// Unlock stats
file.statmu.Unlock()
case <-file.exit:
return
}
// NewAccount makes a Account reader
func NewAccount(in io.ReadCloser) *Account {
return &Account{
in: in,
}
}
// Read bytes from the object - see io.Reader
func (file *Account) Read(p []byte) (n int, err error) {
file.mu.Lock()
defer file.mu.Unlock()
// Set start time.
file.statmu.Lock()
if file.start.IsZero() {
file.start = time.Now()
}
file.statmu.Unlock()
n, err = file.in.Read(p)
// Update Stats
file.statmu.Lock()
file.lpBytes += n
file.bytes += int64(n)
file.statmu.Unlock()
Stats.Bytes(int64(n))
// Limit the transfer speed if required
if tokenBucket != nil {
tokenBucket.Wait(int64(n))
if err == io.EOF {
// FIXME Do something?
}
return
}
// Progress returns bytes read as well as the size.
// Size can be <= 0 if the size is unknown.
func (file *Account) Progress() (bytes, size int64) {
if file == nil {
return 0, 0
}
file.statmu.Lock()
if bytes > size {
size = 0
}
defer file.statmu.Unlock()
return file.bytes, file.size
}
// Speed returns the speed of the current file transfer
// in bytes per second, as well a an exponentially weighted moving average
// If no read has completed yet, 0 is returned for both values.
func (file *Account) Speed() (bps, current float64) {
if file == nil {
return 0, 0
}
file.statmu.Lock()
defer file.statmu.Unlock()
if file.bytes == 0 {
return 0, 0
}
// Calculate speed from first read.
total := float64(time.Now().Sub(file.start)) / float64(time.Second)
bps = float64(file.bytes) / total
current = file.avg.Value()
return
}
// ETA returns the ETA of the current operation,
// rounded to full seconds.
// If the ETA cannot be determined 'ok' returns false.
func (file *Account) ETA() (eta time.Duration, ok bool) {
if file == nil || file.size <= 0 {
return 0, false
}
file.statmu.Lock()
defer file.statmu.Unlock()
if file.bytes == 0 {
return 0, false
}
left := file.size - file.bytes
if left <= 0 {
return 0, true
}
avg := file.avg.Value()
if avg <= 0 {
return 0, false
}
seconds := float64(left) / file.avg.Value()
return time.Duration(time.Second * time.Duration(int(seconds))), true
}
// String produces stats for this file
func (file *Account) String() string {
a, b := file.Progress()
avg, cur := file.Speed()
eta, etaok := file.ETA()
etas := "-"
if etaok {
if eta > 0 {
etas = fmt.Sprintf("%v", eta)
} else {
etas = "0s"
}
}
name := []rune(file.name)
if len(name) > 45 {
where := len(name) - 42
name = append([]rune{'.', '.', '.'}, name[where:]...)
}
if b <= 0 {
return fmt.Sprintf("%45s: avg:%7.1f, cur: %6.1f kByte/s. ETA: %s", string(name), avg/1024, cur/1024, etas)
}
return fmt.Sprintf("%45s: %2d%% done. avg: %6.1f, cur: %6.1f kByte/s. ETA: %s", string(name), int(100*float64(a)/float64(b)), avg/1024, cur/1024, etas)
}
// Close the object
func (file *Account) Close() error {
close(file.exit)
file.mu.Lock()
defer file.mu.Unlock()
Stats.inProgress.clear(file.name)
// FIXME do something?
return file.in.Close()
}

View File

@@ -4,11 +4,8 @@ package fs
import (
"bufio"
"encoding/base64"
"fmt"
"log"
"math"
"net/http"
"os"
"os/user"
"path"
@@ -18,211 +15,55 @@ import (
"time"
"github.com/Unknwon/goconfig"
"github.com/mreiferson/go-httpclient"
"github.com/spf13/pflag"
"github.com/ogier/pflag"
)
const (
configFileName = ".rclone.conf"
)
// SizeSuffix is parsed by flag with k/M/G suffixes
type SizeSuffix int64
// Global
var (
// ConfigFile is the config file data structure
// Config file
ConfigFile *goconfig.ConfigFile
// HomeDir is the home directory of the user
// Home directory
HomeDir = configHome()
// ConfigPath points to the config file
// Config file path
ConfigPath = path.Join(HomeDir, configFileName)
// Config is the global config
// Global config
Config = &ConfigInfo{}
// Flags
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
bwLimit SizeSuffix
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
)
func init() {
pflag.VarP(&bwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix k|M|G")
}
// Turn SizeSuffix into a string
func (x SizeSuffix) String() string {
scaled := float64(0)
suffix := ""
switch {
case x == 0:
return "0"
case x < 1024*1024:
scaled = float64(x) / 1024
suffix = "k"
case x < 1024*1024*1024:
scaled = float64(x) / 1024 / 1024
suffix = "M"
default:
scaled = float64(x) / 1024 / 1024 / 1024
suffix = "G"
}
if math.Floor(scaled) == scaled {
return fmt.Sprintf("%.0f%s", scaled, suffix)
}
return fmt.Sprintf("%.3f%s", scaled, suffix)
}
// Set a SizeSuffix
func (x *SizeSuffix) Set(s string) error {
if len(s) == 0 {
return fmt.Errorf("Empty string")
}
suffix := s[len(s)-1]
suffixLen := 1
var multiplier float64
switch suffix {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
suffixLen = 0
multiplier = 1 << 10
case 'k', 'K':
multiplier = 1 << 10
case 'm', 'M':
multiplier = 1 << 20
case 'g', 'G':
multiplier = 1 << 30
default:
return fmt.Errorf("Bad suffix %q", suffix)
}
s = s[:len(s)-suffixLen]
value, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if value < 0 {
return fmt.Errorf("Size can't be negative %q", s)
}
value *= multiplier
*x = SizeSuffix(value)
return nil
}
// Type of the value
func (x *SizeSuffix) Type() string {
return "int64"
}
// Check it satisfies the interface
var _ pflag.Value = (*SizeSuffix)(nil)
// Obscure a config value
func Obscure(x string) string {
y := []byte(x)
for i := range y {
y[i] ^= byte(i) ^ 0xAA
}
return base64.StdEncoding.EncodeToString(y)
}
// Reveal a config value
func Reveal(y string) string {
x, err := base64.StdEncoding.DecodeString(y)
if err != nil {
log.Fatalf("Failed to reveal %q: %v", y, err)
}
for i := range x {
x[i] ^= byte(i) ^ 0xAA
}
return string(x)
}
// ConfigInfo is filesystem config options
// Filesystem config options
type ConfigInfo struct {
Verbose bool
Quiet bool
DryRun bool
CheckSum bool
SizeOnly bool
ModifyWindow time.Duration
Checkers int
Transfers int
ConnectTimeout time.Duration // Connect timeout
Timeout time.Duration // Data channel timeout
DumpHeaders bool
DumpBodies bool
Filter *Filter
}
// Transport returns an http.RoundTripper with the correct timeouts
func (ci *ConfigInfo) Transport() http.RoundTripper {
t := &httpclient.Transport{
Proxy: http.ProxyFromEnvironment,
MaxIdleConnsPerHost: ci.Checkers + ci.Transfers + 1,
// ConnectTimeout, if non-zero, is the maximum amount of time a dial will wait for
// a connect to complete.
ConnectTimeout: ci.ConnectTimeout,
// ResponseHeaderTimeout, if non-zero, specifies the amount of
// time to wait for a server's response headers after fully
// writing the request (including its body, if any). This
// time does not include the time to read the response body.
ResponseHeaderTimeout: ci.Timeout,
// RequestTimeout, if non-zero, specifies the amount of time for the entire
// request to complete (including all of the above timeouts + entire response body).
// This should never be less than the sum total of the above two timeouts.
//RequestTimeout: NOT SET,
// ReadWriteTimeout, if non-zero, will set a deadline for every Read and
// Write operation on the request connection.
ReadWriteTimeout: ci.Timeout,
}
if ci.DumpHeaders || ci.DumpBodies {
return NewLoggedTransport(t, ci.DumpBodies)
}
return t
}
// Client returns an http.Client with the correct timeouts
func (ci *ConfigInfo) Client() *http.Client {
return &http.Client{
Transport: ci.Transport(),
}
Verbose bool
Quiet bool
DryRun bool
ModifyWindow time.Duration
Checkers int
Transfers int
}
// Find the config directory
func configHome() string {
// Find users home directory
usr, err := user.Current()
if err == nil {
return usr.HomeDir
if err != nil {
log.Printf("Couldn't find home directory: %v", err)
return ""
}
// Fall back to reading $HOME - work around user.Current() not
// working for cross compiled binaries on OSX.
// https://github.com/golang/go/issues/6376
home := os.Getenv("HOME")
if home != "" {
return home
}
log.Printf("Couldn't find home directory or read HOME environment variable.")
log.Printf("Defaulting to storing config in current directory.")
log.Printf("Use -config flag to workaround.")
log.Printf("Error was: %v", err)
return ""
return usr.HomeDir
}
// LoadConfig loads the config file
// Loads the config file
func LoadConfig() {
// Read some flags if set
//
@@ -233,12 +74,6 @@ func LoadConfig() {
Config.Checkers = *checkers
Config.Transfers = *transfers
Config.DryRun = *dryRun
Config.Timeout = *timeout
Config.ConnectTimeout = *connectTimeout
Config.CheckSum = *checkSum
Config.SizeOnly = *sizeOnly
Config.DumpHeaders = *dumpHeaders
Config.DumpBodies = *dumpBodies
ConfigPath = *configFile
@@ -246,24 +81,15 @@ func LoadConfig() {
var err error
ConfigFile, err = goconfig.LoadConfigFile(ConfigPath)
if err != nil {
log.Printf("Failed to load config file %v - using defaults: %v", ConfigPath, err)
log.Printf("Failed to load config file %v - using defaults", ConfigPath)
ConfigFile, err = goconfig.LoadConfigFile(os.DevNull)
if err != nil {
log.Fatalf("Failed to read null config file: %v", err)
}
}
// Load filters
Config.Filter, err = NewFilter()
if err != nil {
log.Fatalf("Failed to load filters: %v", err)
}
// Start the token bucket limiter
startTokenBucket()
}
// SaveConfig saves configuration file.
// Save configuration file.
func SaveConfig() {
err := goconfig.SaveConfigFile(ConfigFile, ConfigPath)
if err != nil {
@@ -275,7 +101,7 @@ func SaveConfig() {
}
}
// ShowRemotes shows an overview of the config file
// Show an overview of the config file
func ShowRemotes() {
remotes := ConfigFile.GetSectionList()
if len(remotes) == 0 {
@@ -296,7 +122,7 @@ func ChooseRemote() string {
return Choose("remote", remotes, nil, false)
}
// ReadLine reads some input
// Read some input
func ReadLine() string {
buf := bufio.NewReader(os.Stdin)
line, err := buf.ReadString('\n')
@@ -328,7 +154,7 @@ func Command(commands []string) byte {
}
}
// Confirm asks the user for Yes or No and returns true or false
// Asks the user for Yes or No and returns true or false
func Confirm() bool {
return Command([]string{"yYes", "nNo"}) == 'y'
}
@@ -365,7 +191,7 @@ func Choose(what string, defaults, help []string, newOk bool) string {
}
}
// ShowRemote shows the contents of the remote
// Show the contents of the remote
func ShowRemote(name string) {
fmt.Printf("--------------------\n")
fmt.Printf("[%s]\n", name)
@@ -375,7 +201,7 @@ func ShowRemote(name string) {
fmt.Printf("--------------------\n")
}
// OkRemote prints the contents of the remote and ask if it is OK
// Print the contents of the remote and ask if it is OK
func OkRemote(name string) bool {
ShowRemote(name)
switch i := Command([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}); i {
@@ -392,7 +218,7 @@ func OkRemote(name string) bool {
return false
}
// RemoteConfig runs the config helper for the remote if needed
// Runs the config helper for the remote if needed
func RemoteConfig(name string) {
fmt.Printf("Remote config\n")
fsName := ConfigFile.MustValue(name, "type")
@@ -408,7 +234,7 @@ func RemoteConfig(name string) {
}
}
// ChooseOption asks the user to choose an option
// Choose an option
func ChooseOption(o *Option) string {
fmt.Println(o.Help)
if len(o.Examples) > 0 {
@@ -424,7 +250,7 @@ func ChooseOption(o *Option) string {
return ReadLine()
}
// NewRemote make a new remote from its name
// Make a new remote
func NewRemote(name string) {
fmt.Printf("What type of source is it?\n")
types := []string{}
@@ -448,7 +274,7 @@ func NewRemote(name string) {
EditRemote(name)
}
// EditRemote gets the user to edit a remote
// Edit a remote
func EditRemote(name string) {
ShowRemote(name)
fmt.Printf("Edit remote\n")
@@ -470,13 +296,13 @@ func EditRemote(name string) {
SaveConfig()
}
// DeleteRemote gets the user to delete a remote
// Delete a remote
func DeleteRemote(name string) {
ConfigFile.DeleteSection(name)
SaveConfig()
}
// EditConfig edits the config file interactively
// Edit the config file interactively
func EditConfig() {
for {
haveRemotes := len(ConfigFile.GetSectionList()) != 0
@@ -494,20 +320,9 @@ func EditConfig() {
name := ChooseRemote()
EditRemote(name)
case 'n':
nameLoop:
for {
fmt.Printf("name> ")
name := ReadLine()
switch {
case name == "":
fmt.Printf("Can't use empty name\n")
case isDriveLetter(name):
fmt.Printf("Can't use %q as it can be confused a drive letter\n", name)
default:
NewRemote(name)
break nameLoop
}
}
fmt.Printf("name> ")
name := ReadLine()
NewRemote(name)
case 'd':
name := ChooseRemote()
DeleteRemote(name)

View File

@@ -1,75 +0,0 @@
package fs
import "testing"
func TestSizeSuffixString(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0"},
{102, "0.100k"},
{1024, "1k"},
{1024 * 1024, "1M"},
{1024 * 1024 * 1024, "1G"},
{10 * 1024 * 1024 * 1024, "10G"},
{10.1 * 1024 * 1024 * 1024, "10.100G"},
} {
ss := SizeSuffix(test.in)
got := ss.String()
if test.want != got {
t.Errorf("Want %v got %v", test.want, got)
}
}
}
func TestSizeSuffixSet(t *testing.T) {
for i, test := range []struct {
in string
want int64
err bool
}{
{"0", 0, false},
{"0.1k", 102, false},
{"0.1", 102, false},
{"1K", 1024, false},
{"1", 1024, false},
{"2.5", 1024 * 2.5, false},
{"1M", 1024 * 1024, false},
{"1.g", 1024 * 1024 * 1024, false},
{"10G", 10 * 1024 * 1024 * 1024, false},
{"", 0, true},
{"1p", 0, true},
{"1.p", 0, true},
{"1p", 0, true},
{"-1K", 0, true},
} {
ss := SizeSuffix(0)
err := ss.Set(test.in)
if (err != nil) != test.err {
t.Errorf("%d: Expecting error %v but got error %v", i, test.err, err)
}
got := int64(ss)
if test.want != got {
t.Errorf("%d: Want %v got %v", i, test.want, got)
}
}
}
func TestReveal(t *testing.T) {
for _, test := range []struct {
in string
want string
}{
{"", ""},
{"2sTcyNrA", "potato"},
} {
got := Reveal(test.in)
if got != test.want {
t.Errorf("%q: want %q got %q", test.in, test.want, got)
}
if Obscure(got) != test.in {
t.Errorf("%q: wasn't bidirectional", test.in)
}
}
}

View File

@@ -1,12 +0,0 @@
// +build !windows
package fs
// isDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
//
// On non windows platforms we don't have drive letters so we always
// return false
func isDriveLetter(name string) bool {
return false
}

View File

@@ -1,13 +0,0 @@
// +build windows
package fs
// isDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
func isDriveLetter(name string) bool {
if len(name) != 1 {
return false
}
c := name[0]
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}

View File

@@ -1,248 +0,0 @@
// Control the filtering of files
package fs
import (
"bufio"
"fmt"
"os"
"regexp"
"strings"
"github.com/spf13/pflag"
)
// Global
var (
// Flags
deleteExcluded = pflag.BoolP("delete-excluded", "", false, "Delete files on dest excluded from sync")
filterRule = pflag.StringP("filter", "f", "", "Add a file-filtering rule")
filterFrom = pflag.StringP("filter-from", "", "", "Read filtering patterns from a file")
excludeRule = pflag.StringP("exclude", "", "", "Exclude files matching pattern")
excludeFrom = pflag.StringP("exclude-from", "", "", "Read exclude patterns from file")
includeRule = pflag.StringP("include", "", "", "Include files matching pattern")
includeFrom = pflag.StringP("include-from", "", "", "Read include patterns from file")
filesFrom = pflag.StringP("files-from", "", "", "Read list of source-file names from file")
minSize SizeSuffix
maxSize SizeSuffix
dumpFilters = pflag.BoolP("dump-filters", "", false, "Dump the filters to the output")
//cvsExclude = pflag.BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
)
func init() {
pflag.VarP(&minSize, "min-size", "", "Don't transfer any file smaller than this in k or suffix k|M|G")
pflag.VarP(&maxSize, "max-size", "", "Don't transfer any file larger than this in k or suffix k|M|G")
}
// rule is one filter rule
type rule struct {
Include bool
Regexp *regexp.Regexp
}
// Match returns true if rule matches path
func (r *rule) Match(path string) bool {
return r.Regexp.MatchString(path)
}
// String the rule
func (r *rule) String() string {
c := "-"
if r.Include {
c = "+"
}
return fmt.Sprintf("%s %s", c, r.Regexp.String())
}
// filesMap describes the map of files to transfer
type filesMap map[string]struct{}
// Filter describes any filtering in operation
type Filter struct {
DeleteExcluded bool
MinSize int64
MaxSize int64
rules []rule
files filesMap
}
// NewFilter parses the command line options and creates a Filter object
func NewFilter() (f *Filter, err error) {
f = &Filter{
DeleteExcluded: *deleteExcluded,
MinSize: int64(minSize),
MaxSize: int64(maxSize),
}
if *includeRule != "" {
err = f.Add(true, *includeRule)
if err != nil {
return nil, err
}
// Add implicit exclude
err = f.Add(false, "*")
if err != nil {
return nil, err
}
}
if *includeFrom != "" {
err := forEachLine(*includeFrom, func(line string) error {
return f.Add(true, line)
})
if err != nil {
return nil, err
}
// Add implicit exclude
err = f.Add(false, "*")
if err != nil {
return nil, err
}
}
if *excludeRule != "" {
err = f.Add(false, *excludeRule)
if err != nil {
return nil, err
}
}
if *excludeFrom != "" {
err := forEachLine(*excludeFrom, func(line string) error {
return f.Add(false, line)
})
if err != nil {
return nil, err
}
}
if *filterRule != "" {
err = f.AddRule(*filterRule)
if err != nil {
return nil, err
}
}
if *filterFrom != "" {
err := forEachLine(*filterFrom, f.AddRule)
if err != nil {
return nil, err
}
}
if *filesFrom != "" {
err := forEachLine(*filesFrom, func(line string) error {
return f.AddFile(line)
})
if err != nil {
return nil, err
}
}
if *dumpFilters {
fmt.Println("--- start filters ---")
fmt.Println(f.DumpFilters())
fmt.Println("--- end filters ---")
}
return f, nil
}
// Add adds a filter rule with include or exclude status indicated
func (f *Filter) Add(Include bool, glob string) error {
re, err := globToRegexp(glob)
if err != nil {
return err
}
rule := rule{
Include: Include,
Regexp: re,
}
f.rules = append(f.rules, rule)
return nil
}
// AddRule adds a filter rule with include/exclude indicated by the prefix
//
// These are
//
// + glob
// - glob
// !
//
// '+' includes the glob, '-' excludes it and '!' resets the filter list
//
// Line comments may be introduced with '#' or ';'
func (f *Filter) AddRule(rule string) error {
switch {
case rule == "!":
f.Clear()
return nil
case strings.HasPrefix(rule, "- "):
return f.Add(false, rule[2:])
case strings.HasPrefix(rule, "+ "):
return f.Add(true, rule[2:])
}
return fmt.Errorf("Malformed rule %q", rule)
}
// AddFile adds a single file to the files from list
func (f *Filter) AddFile(file string) error {
if f.files == nil {
f.files = make(filesMap)
}
file = strings.Trim(file, "/")
f.files[file] = struct{}{}
return nil
}
// Clear clears all the filter rules
func (f *Filter) Clear() {
f.rules = nil
}
// Include returns whether this object should be included into the
// sync or not
func (f *Filter) Include(remote string, size int64) bool {
// filesFrom takes precedence
if f.files != nil {
_, include := f.files[remote]
return include
}
if f.MinSize != 0 && size < f.MinSize {
return false
}
if f.MaxSize != 0 && size > f.MaxSize {
return false
}
for _, rule := range f.rules {
if rule.Match(remote) {
return rule.Include
}
}
return true
}
// forEachLine calls fn on every line in the file pointed to by path
//
// It ignores empty lines and lines starting with '#' or ';'
func forEachLine(path string, fn func(string) error) (err error) {
in, err := os.Open(path)
if err != nil {
return err
}
defer checkClose(in, &err)
scanner := bufio.NewScanner(in)
for scanner.Scan() {
line := scanner.Text()
line = strings.TrimSpace(line)
if len(line) == 0 || line[0] == '#' || line[0] == ';' {
continue
}
err := fn(line)
if err != nil {
return err
}
}
return scanner.Err()
}
// DumpFilters dumps the filters in textual form, 1 per line
func (f *Filter) DumpFilters() string {
rules := []string{}
for _, rule := range f.rules {
rules = append(rules, rule.String())
}
return strings.Join(rules, "\n")
}

View File

@@ -1,252 +0,0 @@
package fs
import (
"io/ioutil"
"os"
"strings"
"testing"
)
func TestNewFilterDefault(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
if f.DeleteExcluded != false {
t.Errorf("DeleteExcluded want false got %v", f.DeleteExcluded)
}
if f.MinSize != 0 {
t.Errorf("MinSize want 0 got %v", f.MinSize)
}
if f.MaxSize != 0 {
t.Errorf("MaxSize want 0 got %v", f.MaxSize)
}
if len(f.rules) != 0 {
t.Errorf("rules want non got %v", f.rules)
}
if f.files != nil {
t.Errorf("files want none got %v", f.files)
}
}
// return a pointer to the string
func stringP(s string) *string {
return &s
}
// testFile creates a temp file with the contents
func testFile(t *testing.T, contents string) *string {
out, err := ioutil.TempFile("", "filter_test")
if err != nil {
t.Fatal(err)
}
defer out.Close()
_, err = out.Write([]byte(contents))
if err != nil {
t.Fatal(err)
}
s := out.Name()
return &s
}
func TestNewFilterFull(t *testing.T) {
mins := int64(100 * 1024)
maxs := int64(1000 * 1024)
emptyString := ""
isFalse := false
isTrue := true
// Set up the input
deleteExcluded = &isTrue
filterRule = stringP("- filter1")
filterFrom = testFile(t, "#comment\n+ filter2\n- filter3\n")
excludeRule = stringP("exclude1")
excludeFrom = testFile(t, "#comment\nexclude2\nexclude3\n")
includeRule = stringP("include1")
includeFrom = testFile(t, "#comment\ninclude2\ninclude3\n")
filesFrom = testFile(t, "#comment\nfiles1\nfiles2\n")
minSize = SizeSuffix(mins)
maxSize = SizeSuffix(maxs)
rm := func(p string) {
err := os.Remove(p)
if err != nil {
t.Logf("error removing %q: %v", p, err)
}
}
// Reset the input
defer func() {
rm(*filterFrom)
rm(*excludeFrom)
rm(*includeFrom)
rm(*filesFrom)
minSize = 0
maxSize = 0
deleteExcluded = &isFalse
filterRule = &emptyString
filterFrom = &emptyString
excludeRule = &emptyString
excludeFrom = &emptyString
includeRule = &emptyString
includeFrom = &emptyString
filesFrom = &emptyString
}()
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
if f.DeleteExcluded != true {
t.Errorf("DeleteExcluded want true got %v", f.DeleteExcluded)
}
if f.MinSize != mins {
t.Errorf("MinSize want %v got %v", mins, f.MinSize)
}
if f.MaxSize != maxs {
t.Errorf("MaxSize want %v got %v", maxs, f.MaxSize)
}
got := f.DumpFilters()
want := `+ (^|/)include1$
- (^|/)[^/]*$
+ (^|/)include2$
+ (^|/)include3$
- (^|/)[^/]*$
- (^|/)exclude1$
- (^|/)exclude2$
- (^|/)exclude3$
- (^|/)filter1$
+ (^|/)filter2$
- (^|/)filter3$`
if got != want {
t.Errorf("rules want %s got %s", want, got)
}
if len(f.files) != 2 {
t.Errorf("files want 2 got %v", f.files)
}
for _, name := range []string{"files1", "files2"} {
_, ok := f.files[name]
if !ok {
t.Errorf("Didn't find file %q in f.files", name)
}
}
}
type includeTest struct {
in string
size int64
want bool
}
func testInclude(t *testing.T, f *Filter, tests []includeTest) {
for _, test := range tests {
got := f.Include(test.in, test.size)
if test.want != got {
t.Errorf("%q,%d: want %v got %v", test.in, test.size, test.want, got)
}
}
}
func TestNewFilterIncludeFiles(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
f.AddFile("file1.jpg")
f.AddFile("/file2.jpg")
testInclude(t, f, []includeTest{
{"file1.jpg", 0, true},
{"file2.jpg", 1, true},
{"potato/file2.jpg", 2, false},
{"file3.jpg", 3, false},
})
}
func TestNewFilterMinSize(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
f.MinSize = 100
testInclude(t, f, []includeTest{
{"file1.jpg", 100, true},
{"file2.jpg", 101, true},
{"potato/file2.jpg", 99, false},
})
}
func TestNewFilterMaxSize(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
f.MaxSize = 100
testInclude(t, f, []includeTest{
{"file1.jpg", 100, true},
{"file2.jpg", 101, false},
{"potato/file2.jpg", 99, true},
})
}
func TestNewFilterMatches(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
add := func(s string) {
err := f.AddRule(s)
if err != nil {
t.Fatal(err)
}
}
add("+ cleared")
add("!")
add("- file1.jpg")
add("+ file2.png")
add("+ *.jpg")
add("- *.png")
add("- /potato")
add("+ /sausage1")
add("+ /sausage2*")
add("+ /sausage3**")
add("- *")
testInclude(t, f, []includeTest{
{"cleared", 100, false},
{"file1.jpg", 100, false},
{"file2.png", 100, true},
{"afile2.png", 100, false},
{"file3.jpg", 101, true},
{"file4.png", 101, false},
{"potato", 101, false},
{"sausage1", 101, true},
{"sausage1/potato", 101, false},
{"sausage2potato", 101, true},
{"sausage2/potato", 101, false},
{"sausage3/potato", 101, true},
{"unicorn", 99, false},
})
}
func TestFilterForEachLine(t *testing.T) {
file := testFile(t, `; comment
one
# another comment
two
# indented comment
three
four
five
six `)
defer os.Remove(*file)
lines := []string{}
forEachLine(*file, func(s string) error {
lines = append(lines, s)
return nil
})
got := strings.Join(lines, ",")
want := "one,two,three,four,five,six"
if want != got {
t.Errorf("want %q got %q", want, got)
}
}

199
fs/fs.go
View File

@@ -1,38 +1,29 @@
// Package fs is a generic file system interface for rclone object storage systems
// File system interface
package fs
import (
"fmt"
"io"
"log"
"path/filepath"
"regexp"
"time"
)
// Constants
const (
// UserAgent for Fs which can set it
// User agent for Fs which can set it
UserAgent = "rclone/" + Version
// ModTimeNotSupported is a very large precision value to show
// mod time isn't supported on this Fs
ModTimeNotSupported = 100 * 365 * 24 * time.Hour
)
// Globals
var (
// Filesystem registry
fsRegistry []*Info
// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
ErrorNotFoundInConfigFile = fmt.Errorf("Didn't find section in config file")
ErrorCantCopy = fmt.Errorf("Can't copy object - incompatible remotes")
ErrorCantMove = fmt.Errorf("Can't copy object - incompatible remotes")
ErrorCantDirMove = fmt.Errorf("Can't copy directory - incompatible remotes")
ErrorDirExists = fmt.Errorf("Can't copy directory - destination already exists")
fsRegistry []*FsInfo
)
// Info information about a filesystem
type Info struct {
// Filesystem info
type FsInfo struct {
// Name of this fs
Name string
// Create a new file system. If root refers to an existing
@@ -45,7 +36,7 @@ type Info struct {
Options []Option
}
// Option is describes an option for the config wizard
// An options for a Fs
type Option struct {
Name string
Help string
@@ -53,7 +44,7 @@ type Option struct {
Examples []OptionExample
}
// OptionExample describes an example for an Option
// An example for an option
type OptionExample struct {
Value string
Help string
@@ -62,28 +53,22 @@ type OptionExample struct {
// Register a filesystem
//
// Fs modules should use this in an init() function
func Register(info *Info) {
func Register(info *FsInfo) {
fsRegistry = append(fsRegistry, info)
}
// Fs is the interface a cloud storage system must provide
// A Filesystem, describes the local filesystem and the remote object store
type Fs interface {
// Name of the remote (as passed into NewFs)
Name() string
// Root of the remote (as passed into NewFs)
Root() string
// String returns a description of the FS
String() string
// List the Fs into a channel
List() ObjectsChan
// ListDir lists the Fs directories/buckets/containers into a channel
// List the Fs directories/buckets/containers into a channel
ListDir() DirChan
// NewFsObject finds the Object at remote. Returns nil if can't be found
// Find the Object at remote. Returns nil if can't be found
NewFsObject(remote string) Object
// Put in to the remote path with the modTime given of the given size
@@ -93,21 +78,18 @@ type Fs interface {
// nil and the error
Put(in io.Reader, remote string, modTime time.Time, size int64) (Object, error)
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
// Make the directory (container, bucket)
Mkdir() error
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
// Remove the directory (container, bucket) if empty
Rmdir() error
// Precision of the ModTimes in this Fs
Precision() time.Duration
}
// Object is a filesystem like object provided by an Fs
// A filesystem like object which can either be a remote object or a
// local file/directory
type Object interface {
// String returns a description of the Object
String() string
@@ -119,11 +101,9 @@ type Object interface {
Remote() string
// Md5sum returns the md5 checksum of the file
// If no Md5sum is available it returns ""
Md5sum() (string, error)
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
ModTime() time.Time
// SetModTime sets the metadata on the object to set the modification date
@@ -145,122 +125,30 @@ type Object interface {
Remove() error
}
// Purger is an optional interfaces for Fs
// Optional interfaces
type Purger interface {
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
Purge() error
}
// Copier is an optional interface for Fs
type Copier interface {
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
Copy(src Object, remote string) (Object, error)
}
// Mover is an optional interface for Fs
type Mover interface {
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
Move(src Object, remote string) (Object, error)
}
// DirMover is an optional interface for Fs
type DirMover interface {
// DirMove moves src to this remote using server side move
// operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
DirMove(src Fs) error
}
// Retry is optional interface for error as to whether the operation
// should be retried at a high level.
//
// This should be returned from Update or Put methods as required
type Retry interface {
error
Retry() bool
}
// retryError is a type of error
type retryError string
// Error interface
func (r retryError) Error() string {
return string(r)
}
// Retry interface
func (r retryError) Retry() bool {
return true
}
// Check interface
var _ Retry = retryError("")
// RetryErrorf makes an error which indicates it would like to be retried
func RetryErrorf(format string, a ...interface{}) error {
return retryError(fmt.Sprintf(format, a...))
}
// PlainRetryError is an error wrapped so it will retry
type plainRetryError struct {
error
}
// Retry interface
func (err plainRetryError) Retry() bool {
return true
}
// Check interface
var _ Retry = plainRetryError{(error)(nil)}
// RetryError makes an error which indicates it would like to be retried
func RetryError(err error) error {
return plainRetryError{err}
}
// ObjectsChan is a channel of Objects
// A channel of Objects
type ObjectsChan chan Object
// Objects is a slice of Object~s
// A slice of Objects
type Objects []Object
// ObjectPair is a pair of Objects used to describe a potential copy
// operation.
// A pair of Objects
type ObjectPair struct {
src, dst Object
}
// ObjectPairChan is a channel of ObjectPair
// A channel of ObjectPair
type ObjectPairChan chan ObjectPair
// Dir describes a directory for directory/container/bucket lists
// A structure of directory/container/bucket lists
type Dir struct {
Name string // name of the directory
When time.Time // modification or creation time - IsZero for unknown
@@ -268,13 +156,16 @@ type Dir struct {
Count int64 // number of objects -1 for unknown
}
// DirChan is a channel of Dir objects
// A channel of Dir objects
type DirChan chan *Dir
// Find looks for an Info object for the name passed in
// Pattern to match a url
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
// Finds a FsInfo object for the name passed in
//
// Services are looked up in the config file
func Find(name string) (*Info, error) {
func Find(name string) (*FsInfo, error) {
for _, item := range fsRegistry {
if item.Name == name {
return item, nil
@@ -283,69 +174,53 @@ func Find(name string) (*Info, error) {
return nil, fmt.Errorf("Didn't find filing system for %q", name)
}
// Pattern to match an rclone url
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
// NewFs makes a new Fs object from the path
//
// The path is of the form remote:path
// The path is of the form service://path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
// Services are looked up in the config file
func NewFs(path string) (Fs, error) {
parts := matcher.FindStringSubmatch(path)
fsName, configName, fsPath := "local", "local", path
if parts != nil && !isDriveLetter(parts[1]) {
if parts != nil {
configName, fsPath = parts[1], parts[2]
var err error
fsName, err = ConfigFile.GetValue(configName, "type")
if err != nil {
return nil, ErrorNotFoundInConfigFile
return nil, fmt.Errorf("Didn't find section in config file for %q", configName)
}
}
fs, err := Find(fsName)
if err != nil {
return nil, err
}
// change native directory separators to / if there are any
fsPath = filepath.ToSlash(fsPath)
return fs.NewFs(configName, fsPath)
}
// OutputLog logs for an object
// Outputs log for object
func OutputLog(o interface{}, text string, args ...interface{}) {
description := ""
if o != nil {
description = fmt.Sprintf("%v: ", o)
if x, ok := o.(fmt.Stringer); ok {
description = x.String() + ": "
}
out := fmt.Sprintf(text, args...)
log.Print(description + out)
}
// Debug writes debuging output for this Object or Fs
// Write debuging output for this Object or Fs
func Debug(o interface{}, text string, args ...interface{}) {
if Config.Verbose {
OutputLog(o, text, args...)
}
}
// Log writes log output for this Object or Fs
// Write log output for this Object or Fs
func Log(o interface{}, text string, args ...interface{}) {
if !Config.Quiet {
OutputLog(o, text, args...)
}
}
// ErrorLog writes error log output for this Object or Fs. It
// unconditionally logs a message regardless of Config.Quiet or
// Config.Verbose.
func ErrorLog(o interface{}, text string, args ...interface{}) {
OutputLog(o, text, args...)
}
// checkClose is a utility function used to check the return from
// Close in a defer statement.
func checkClose(c io.Closer, err *error) {

View File

@@ -1,117 +0,0 @@
// rsync style glob parser
package fs
import (
"bytes"
"fmt"
"regexp"
"strings"
)
// globToRegexp converts an rsync style glob to a regexp
//
// documented in filtering.md
func globToRegexp(glob string) (*regexp.Regexp, error) {
var re bytes.Buffer
if strings.HasPrefix(glob, "/") {
glob = glob[1:]
_, _ = re.WriteRune('^')
} else {
_, _ = re.WriteString("(^|/)")
}
consecutiveStars := 0
insertStars := func() error {
if consecutiveStars > 0 {
switch consecutiveStars {
case 1:
_, _ = re.WriteString(`[^/]*`)
case 2:
_, _ = re.WriteString(`.*`)
default:
return fmt.Errorf("too many stars in %q", glob)
}
}
consecutiveStars = 0
return nil
}
inBraces := false
inBrackets := 0
slashed := false
for _, c := range glob {
if slashed {
_, _ = re.WriteRune(c)
slashed = false
continue
}
if c != '*' {
err := insertStars()
if err != nil {
return nil, err
}
}
if inBrackets > 0 {
_, _ = re.WriteRune(c)
if c == '[' {
inBrackets++
}
if c == ']' {
inBrackets--
}
continue
}
switch c {
case '\\':
_, _ = re.WriteRune(c)
slashed = true
case '*':
consecutiveStars++
case '?':
_, _ = re.WriteString(`[^/]`)
case '[':
_, _ = re.WriteRune(c)
inBrackets++
case ']':
return nil, fmt.Errorf("mismatched ']' in glob %q", glob)
case '{':
if inBraces {
return nil, fmt.Errorf("can't nest '{' '}' in glob %q", glob)
}
inBraces = true
_, _ = re.WriteRune('(')
case '}':
if !inBraces {
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
}
_, _ = re.WriteRune(')')
inBraces = false
case ',':
if inBraces {
_, _ = re.WriteRune('|')
} else {
_, _ = re.WriteRune(c)
}
case '.', '+', '(', ')', '|', '^', '$': // regexp meta characters not dealt with above
_, _ = re.WriteRune('\\')
_, _ = re.WriteRune(c)
default:
_, _ = re.WriteRune(c)
}
}
err := insertStars()
if err != nil {
return nil, err
}
if inBrackets > 0 {
return nil, fmt.Errorf("mismatched '[' and ']' in glob %q", glob)
}
if inBraces {
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
}
_, _ = re.WriteRune('$')
result, err := regexp.Compile(re.String())
if err != nil {
return nil, fmt.Errorf("Bad glob pattern %q: %v (%q)", glob, err, re.String())
}
return result, nil
}

View File

@@ -1,64 +0,0 @@
package fs
import (
"strings"
"testing"
)
func TestGlobToRegexp(t *testing.T) {
for _, test := range []struct {
in string
want string
error string
}{
{``, `(^|/)$`, ``},
{`potato`, `(^|/)potato$`, ``},
{`potato,sausage`, `(^|/)potato,sausage$`, ``},
{`/potato`, `^potato$`, ``},
{`potato?sausage`, `(^|/)potato[^/]sausage$`, ``},
{`potat[oa]`, `(^|/)potat[oa]$`, ``},
{`potat[a-z]or`, `(^|/)potat[a-z]or$`, ``},
{`potat[[:alpha:]]or`, `(^|/)potat[[:alpha:]]or$`, ``},
{`'.' '+' '(' ')' '|' '^' '$'`, `(^|/)'\.' '\+' '\(' '\)' '\|' '\^' '\$'$`, ``},
{`*.jpg`, `(^|/)[^/]*\.jpg$`, ``},
{`a{b,c,d}e`, `(^|/)a(b|c|d)e$`, ``},
{`potato**`, `(^|/)potato.*$`, ``},
{`potato**sausage`, `(^|/)potato.*sausage$`, ``},
{`*.p[lm]`, `(^|/)[^/]*\.p[lm]$`, ``},
{`[\[\]]`, `(^|/)[\[\]]$`, ``},
{`***potato`, `(^|/)`, `too many stars`},
{`***`, `(^|/)`, `too many stars`},
{`ab]c`, `(^|/)`, `mismatched ']'`},
{`ab[c`, `(^|/)`, `mismatched '[' and ']'`},
{`ab{{cd`, `(^|/)`, `can't nest`},
{`ab{}}cd`, `(^|/)`, `mismatched '{' and '}'`},
{`ab}c`, `(^|/)`, `mismatched '{' and '}'`},
{`ab{c`, `(^|/)`, `mismatched '{' and '}'`},
{`*.{jpg,png,gif}`, `(^|/)[^/]*\.(jpg|png|gif)$`, ``},
{`[a--b]`, `(^|/)`, `Bad glob pattern`},
{`a\*b`, `(^|/)a\*b$`, ``},
{`a\\b`, `(^|/)a\\b$`, ``},
} {
gotRe, err := globToRegexp(test.in)
if test.error == "" {
if err != nil {
t.Errorf("%q: not expecting error: %v", test.in, err)
} else {
got := gotRe.String()
if test.want != got {
t.Errorf("%q: want %q got %q", test.in, test.want, got)
}
}
} else {
if err == nil {
t.Errorf("%q: expecting error but didn't get one", test.in)
} else {
got := err.Error()
if !strings.Contains(got, test.error) {
t.Errorf("%q: want error %q got %q", test.in, test.error, got)
}
}
}
}
}

View File

@@ -6,8 +6,7 @@ import (
"time"
)
// Limited defines a Fs which can only return the Objects passed in
// from the Fs passed in
// This defines a Limited Fs which can only return the Objects passed in from the Fs passed in
type Limited struct {
objects []Object
fs Fs
@@ -22,16 +21,6 @@ func NewLimited(fs Fs, objects ...Object) Fs {
return f
}
// Name is name of the remote (as passed into NewFs)
func (f *Limited) Name() string {
return f.fs.Name() // return name of underlying remote
}
// Root is the root of the remote (as passed into NewFs)
func (f *Limited) Root() string {
return f.fs.Root() // return root of underlying remote
}
// String returns a description of the FS
func (f *Limited) String() string {
return fmt.Sprintf("%s limited to %d objects", f.fs.String(), len(f.objects))
@@ -49,14 +38,14 @@ func (f *Limited) List() ObjectsChan {
return out
}
// ListDir lists the Fs directories/buckets/containers into a channel
// List the Fs directories/buckets/containers into a channel
func (f *Limited) ListDir() DirChan {
out := make(DirChan, Config.Checkers)
close(out)
return out
}
// NewFsObject finds the Object at remote. Returns nil if can't be found
// Find the Object at remote. Returns nil if can't be found
func (f *Limited) NewFsObject(remote string) Object {
for _, obj := range f.objects {
if obj.Remote() == remote {
@@ -79,16 +68,15 @@ func (f *Limited) Put(in io.Reader, remote string, modTime time.Time, size int64
return obj, obj.Update(in, modTime, size)
}
// Mkdir make the directory (container, bucket)
// Make the directory (container, bucket)
func (f *Limited) Mkdir() error {
// All directories are already made - just ignore
return nil
}
// Rmdir removes the directory (container, bucket) if empty
// Remove the directory (container, bucket) if empty
func (f *Limited) Rmdir() error {
// Ignore this in a limited fs
return nil
return fmt.Errorf("Can't rmdir in limited fs")
}
// Precision of the ModTimes in this Fs
@@ -96,23 +84,5 @@ func (f *Limited) Precision() time.Duration {
return f.fs.Precision()
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Limited) Copy(src Object, remote string) (Object, error) {
fCopy, ok := f.fs.(Copier)
if !ok {
return nil, ErrorCantCopy
}
return fCopy.Copy(src, remote)
}
// Check the interfaces are satisfied
var _ Fs = &Limited{}
var _ Copier = &Limited{}

View File

@@ -1,57 +0,0 @@
// A logging http transport
package fs
import (
"log"
"net/http"
"net/http/httputil"
)
const (
separatorReq = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
separatorResp = "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
)
// LoggedTransport is an http transport which logs the traffic
type LoggedTransport struct {
wrapped http.RoundTripper
logBody bool
}
// NewLoggedTransport wraps the transport passed in and logs all roundtrips
// including the body if logBody is set.
func NewLoggedTransport(transport http.RoundTripper, logBody bool) *LoggedTransport {
return &LoggedTransport{
wrapped: transport,
logBody: logBody,
}
}
// CancelRequest cancels an in-flight request by closing its
// connection. CancelRequest should only be called after RoundTrip has
// returned.
func (t *LoggedTransport) CancelRequest(req *http.Request) {
if wrapped, ok := t.wrapped.(interface {
CancelRequest(*http.Request)
}); ok {
log.Printf("CANCEL REQUEST %v", req)
wrapped.CancelRequest(req)
}
}
// RoundTrip implements the RoundTripper interface.
func (t *LoggedTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
buf, _ := httputil.DumpRequestOut(req, t.logBody)
log.Println(separatorReq)
log.Println("HTTP REQUEST")
log.Println(string(buf))
log.Println(separatorReq)
resp, err = t.wrapped.RoundTrip(req)
buf, _ = httputil.DumpResponse(resp, t.logBody)
log.Println(separatorResp)
log.Println("HTTP RESPONSE")
log.Println(string(buf))
log.Println(separatorResp)
return resp, err
}

View File

@@ -4,16 +4,11 @@ package fs
import (
"fmt"
"io"
"mime"
"path"
"log"
"sync"
"sync/atomic"
"time"
)
// CalculateModifyWindow works out modify window for Fses passed in -
// sets Config.ModifyWindow
// Work out modify window for fses passed in - sets Config.ModifyWindow
//
// This is the largest modify window of all the fses in use, and the
// user configured value
@@ -24,69 +19,47 @@ func CalculateModifyWindow(fs ...Fs) {
if precision > Config.ModifyWindow {
Config.ModifyWindow = precision
}
if precision == ModTimeNotSupported {
Debug(f, "Modify window not supported")
return
}
}
}
Debug(fs[0], "Modify window is %s", Config.ModifyWindow)
Debug(fs[0], "Modify window is %s\n", Config.ModifyWindow)
}
// Md5sumsEqual checks to see if src == dst, but ignores empty strings
func Md5sumsEqual(src, dst string) bool {
if src == "" || dst == "" {
return true
}
return src == dst
}
// CheckMd5sums checks the two files to see if the MD5sums are the same
//
// Returns two bools, the first of which is equality and the second of
// which is true if either of the MD5SUMs were unset.
// Check the two files to see if the MD5sums are the same
//
// May return an error which will already have been logged
//
// If an error is returned it will return equal as false
func CheckMd5sums(src, dst Object) (equal bool, unset bool, err error) {
// If an error is returned it will return false
func CheckMd5sums(src, dst Object) (bool, error) {
srcMd5, err := src.Md5sum()
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to calculate src md5: %s", err)
return false, false, err
}
if srcMd5 == "" {
return true, true, nil
Log(src, "Failed to calculate src md5: %s", err)
return false, err
}
dstMd5, err := dst.Md5sum()
if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to calculate dst md5: %s", err)
return false, false, err
}
if dstMd5 == "" {
return true, true, nil
Log(dst, "Failed to calculate dst md5: %s", err)
return false, err
}
// Debug("Src MD5 %s", srcMd5)
// Debug("Dst MD5 %s", obj.Hash)
return Md5sumsEqual(srcMd5, dstMd5), false, nil
return srcMd5 == dstMd5, nil
}
// Equal checks to see if the src and dst objects are equal by looking at
// Checks to see if the src and dst objects are equal by looking at
// size, mtime and MD5SUM
//
// If the src and dst size are different then it is considered to be
// not equal. If --size-only is in effect then this is the only check
// that is done.
// not equal.
//
// If the size is the same and the mtime is the same then it is
// considered to be equal. This check is skipped if using --checksum.
// considered to be equal. This is the heuristic rsync uses when
// not using --checksum.
//
// If the size is the same and mtime is different, unreadable or
// --checksum is set and the MD5SUM is the same then the file is
// considered to be equal. In this case the mtime on the dst is
// updated if --checksum is not set.
// If the size is the same and and mtime is different or unreadable
// and the MD5SUM is the same then the file is considered to be equal.
// In this case the mtime on the dst is updated.
//
// Otherwise the file is considered to be not equal including if there
// were errors reading info.
@@ -95,75 +68,45 @@ func Equal(src, dst Object) bool {
Debug(src, "Sizes differ")
return false
}
if Config.SizeOnly {
Debug(src, "Sizes identical")
return true
}
var srcModTime time.Time
if !Config.CheckSum {
if Config.ModifyWindow == ModTimeNotSupported {
Debug(src, "Sizes identical")
return true
}
// Size the same so check the mtime
srcModTime = src.ModTime()
dstModTime := dst.ModTime()
dt := dstModTime.Sub(srcModTime)
ModifyWindow := Config.ModifyWindow
if dt >= ModifyWindow || dt <= -ModifyWindow {
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
} else {
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
return true
}
// Size the same so check the mtime
srcModTime := src.ModTime()
dstModTime := dst.ModTime()
dt := dstModTime.Sub(srcModTime)
ModifyWindow := Config.ModifyWindow
if dt >= ModifyWindow || dt <= -ModifyWindow {
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
} else {
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
return true
}
// mtime is unreadable or different but size is the same so
// check the MD5SUM
same, md5unset, _ := CheckMd5sums(src, dst)
same, _ := CheckMd5sums(src, dst)
if !same {
Debug(src, "Md5sums differ")
return false
}
if !Config.CheckSum {
// Size and MD5 the same but mtime different so update the
// mtime of the dst object here
dst.SetModTime(srcModTime)
}
// Size and MD5 the same but mtime different so update the
// mtime of the dst object here
dst.SetModTime(srcModTime)
if md5unset {
Debug(src, "Size of src and dst objects identical")
} else {
Debug(src, "Size and MD5SUM of src and dst objects identical")
}
Debug(src, "Size and MD5SUM of src and dst objects identical")
return true
}
// MimeType returns a guess at the mime type from the extension
func MimeType(o Object) string {
mimeType := mime.TypeByExtension(path.Ext(o.Remote()))
if mimeType == "" {
mimeType = "application/octet-stream"
}
return mimeType
}
// Used to remove a failed copy
//
// Returns whether the file was succesfully removed or not
func removeFailedCopy(dst Object) bool {
if dst == nil {
return false
func removeFailedCopy(dst Object) {
if dst != nil {
Debug(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Stats.Error()
Log(dst, "Failed to remove failed copy: %s", removeErr)
}
}
Debug(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Debug(dst, "Failed to remove failed copy: %s", removeErr)
return false
}
return true
}
// Copy src object to dst or f if nil
@@ -172,60 +115,29 @@ func removeFailedCopy(dst Object) bool {
// call Copy() with dst nil on a pre-existing file then some filing
// systems (eg Drive) may duplicate the file.
func Copy(f Fs, dst, src Object) {
const maxTries = 10
tries := 0
doUpdate := dst != nil
var err, inErr error
tryAgain:
// Try server side copy first - if has optional interface and
// is same underlying remote
actionTaken := "Copied (server side copy)"
if fCopy, ok := f.(Copier); ok && src.Fs().Name() == f.Name() {
var newDst Object
newDst, err = fCopy.Copy(src, src.Remote())
if err == nil {
dst = newDst
}
} else {
err = ErrorCantCopy
in0, err := src.Open()
if err != nil {
Stats.Error()
Log(src, "Failed to open: %s", err)
return
}
// If can't server side copy, do it manually
if err == ErrorCantCopy {
var in0 io.ReadCloser
in0, err = src.Open()
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to open: %s", err)
return
}
in := NewAccount(in0, src) // account the transfer
in := NewAccount(in0) // account the transfer
if doUpdate {
actionTaken = "Copied (updated existing)"
err = dst.Update(in, src.ModTime(), src.Size())
} else {
actionTaken = "Copied (new)"
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
}
inErr = in.Close()
}
// Retry if err returned a retry error
if r, ok := err.(Retry); ok && r.Retry() && tries < maxTries {
tries++
Log(src, "Received error: %v - retrying %d/%d", err, tries, maxTries)
if removeFailedCopy(dst) {
// If we removed dst, then nil it out and note we are not updating
dst = nil
doUpdate = false
}
goto tryAgain
var actionTaken string
if dst != nil {
actionTaken = "Copied (updated existing)"
err = dst.Update(in, src.ModTime(), src.Size())
} else {
actionTaken = "Copied (new)"
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
}
inErr := in.Close()
if err == nil {
err = inErr
}
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to copy: %s", err)
Log(src, "Failed to copy: %s", err)
removeFailedCopy(dst)
return
}
@@ -234,29 +146,27 @@ tryAgain:
if src.Size() != dst.Size() {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
ErrorLog(dst, "%s", err)
Log(dst, "%s", err)
removeFailedCopy(dst)
return
}
// Verify md5sums are the same after transfer - ignoring blank md5sums
if !Config.SizeOnly {
srcMd5sum, md5sumErr := src.Md5sum()
srcMd5sum, md5sumErr := src.Md5sum()
if md5sumErr != nil {
Stats.Error()
Log(src, "Failed to read md5sum: %s", md5sumErr)
} else if srcMd5sum != "" {
dstMd5sum, md5sumErr := dst.Md5sum()
if md5sumErr != nil {
Stats.Error()
ErrorLog(src, "Failed to read md5sum: %s", md5sumErr)
} else if srcMd5sum != "" {
dstMd5sum, md5sumErr := dst.Md5sum()
if md5sumErr != nil {
Stats.Error()
ErrorLog(dst, "Failed to read md5sum: %s", md5sumErr)
} else if !Md5sumsEqual(srcMd5sum, dstMd5sum) {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
ErrorLog(dst, "%s", err)
removeFailedCopy(dst)
return
}
Log(dst, "Failed to read md5sum: %s", md5sumErr)
} else if dstMd5sum != "" && srcMd5sum != dstMd5sum {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
Log(dst, "%s", err)
removeFailedCopy(dst)
return
}
}
@@ -283,7 +193,7 @@ func checkOne(pair ObjectPair, out ObjectPairChan) {
out <- pair
}
// PairChecker reads Objects~s on in send to out if they need transferring.
// Read Objects~s on in send to out if they need uploading
//
// FIXME potentially doing lots of MD5SUMS at once
func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
@@ -296,8 +206,8 @@ func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
}
}
// PairCopier reads Objects on in and copies them.
func PairCopier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
// Read Objects on in and copy them
func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
defer wg.Done()
for pair := range in {
src := pair.src
@@ -311,49 +221,16 @@ func PairCopier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
}
}
// PairMover reads Objects on in and moves them if possible, or copies
// them if not
func PairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
defer wg.Done()
// See if we have Move available
fdstMover, haveMover := fdst.(Mover)
for pair := range in {
src := pair.src
dst := pair.dst
Stats.Transferring(src)
if Config.DryRun {
Debug(src, "Not moving as --dry-run")
} else if haveMover {
// Delete destination if it exists
if pair.dst != nil {
err := dst.Remove()
if err != nil {
Stats.Error()
ErrorLog(dst, "Couldn't delete: %v", err)
}
}
_, err := fdstMover.Move(src, src.Remote())
if err != nil {
Stats.Error()
ErrorLog(dst, "Couldn't move: %v", err)
} else {
Debug(src, "Moved")
}
} else {
Copy(fdst, pair.dst, src)
}
Stats.DoneTransferring(src)
}
}
// DeleteFiles removes all the files passed in the channel
func DeleteFiles(toBeDeleted ObjectsChan) {
// Delete all the files passed in the channel
func DeleteFiles(to_be_deleted ObjectsChan) {
var wg sync.WaitGroup
wg.Add(Config.Transfers)
var fs Fs
for i := 0; i < Config.Transfers; i++ {
go func() {
defer wg.Done()
for dst := range toBeDeleted {
for dst := range to_be_deleted {
fs = dst.Fs()
if Config.DryRun {
Debug(dst, "Not deleting as --dry-run")
} else {
@@ -362,7 +239,7 @@ func DeleteFiles(toBeDeleted ObjectsChan) {
Stats.DoneChecking(dst)
if err != nil {
Stats.Error()
ErrorLog(dst, "Couldn't delete: %s", err)
Log(dst, "Couldn't delete: %s", err)
} else {
Debug(dst, "Deleted")
}
@@ -370,40 +247,15 @@ func DeleteFiles(toBeDeleted ObjectsChan) {
}
}()
}
Log(nil, "Waiting for deletions to finish")
Log(fs, "Waiting for deletions to finish")
wg.Wait()
}
// Read a map of Object.Remote to Object for the given Fs
func readFilesMap(fs Fs) map[string]Object {
files := make(map[string]Object)
for o := range fs.List() {
remote := o.Remote()
if _, ok := files[remote]; !ok {
files[remote] = o
} else {
Log(o, "Duplicate file detected")
}
}
return files
}
// Same returns true if fdst and fsrc point to the same underlying Fs
func Same(fdst, fsrc Fs) bool {
return fdst.Name() == fsrc.Name() && fdst.Root() == fsrc.Root()
}
// Syncs fsrc into fdst
//
// If Delete is true then it deletes any files in fdst that aren't in fsrc
//
// If DoMove is true then files will be moved instead of copied
func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
if Same(fdst, fsrc) {
ErrorLog(fdst, "Nothing to do as source and destination are the same")
return nil
}
func Sync(fdst, fsrc Fs, Delete bool) error {
err := fdst.Mkdir()
if err != nil {
Stats.Error()
@@ -414,60 +266,52 @@ func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
// Read the destination files first
// FIXME could do this in parallel and make it use less memory
delFiles := readFilesMap(fdst)
delFiles := make(map[string]Object)
for dst := range fdst.List() {
delFiles[dst.Remote()] = dst
}
// Read source files checking them off against dest files
toBeChecked := make(ObjectPairChan, Config.Transfers)
toBeUploaded := make(ObjectPairChan, Config.Transfers)
to_be_checked := make(ObjectPairChan, Config.Transfers)
to_be_uploaded := make(ObjectPairChan, Config.Transfers)
var checkerWg sync.WaitGroup
checkerWg.Add(Config.Checkers)
for i := 0; i < Config.Checkers; i++ {
go PairChecker(toBeChecked, toBeUploaded, &checkerWg)
go PairChecker(to_be_checked, to_be_uploaded, &checkerWg)
}
var copierWg sync.WaitGroup
copierWg.Add(Config.Transfers)
for i := 0; i < Config.Transfers; i++ {
if DoMove {
go PairMover(toBeUploaded, fdst, &copierWg)
} else {
go PairCopier(toBeUploaded, fdst, &copierWg)
}
go Copier(to_be_uploaded, fdst, &copierWg)
}
go func() {
for src := range fsrc.List() {
remote := src.Remote()
dst, dstFound := delFiles[remote]
if !Config.Filter.Include(remote, src.Size()) {
Debug(src, "Excluding from sync")
if dstFound && !Config.Filter.DeleteExcluded {
delete(delFiles, remote)
}
dst, found := delFiles[remote]
if found {
delete(delFiles, remote)
to_be_checked <- ObjectPair{src, dst}
} else {
if dstFound {
delete(delFiles, remote)
toBeChecked <- ObjectPair{src, dst}
} else {
// No need to check since doesn't exist
toBeUploaded <- ObjectPair{src, nil}
}
// No need to check since doesn't exist
to_be_uploaded <- ObjectPair{src, nil}
}
}
close(toBeChecked)
close(to_be_checked)
}()
Log(fdst, "Waiting for checks to finish")
checkerWg.Wait()
close(toBeUploaded)
close(to_be_uploaded)
Log(fdst, "Waiting for transfers to finish")
copierWg.Wait()
// Delete files if asked
if Delete {
if Stats.Errored() {
ErrorLog(fdst, "Not deleting files as there were IO errors")
Log(fdst, "Not deleting files as there were IO errors")
return nil
}
@@ -484,82 +328,41 @@ func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
return nil
}
// Sync fsrc into fdst
func Sync(fdst, fsrc Fs) error {
return syncCopyMove(fdst, fsrc, true, false)
}
// CopyDir copies fsrc into fdst
func CopyDir(fdst, fsrc Fs) error {
return syncCopyMove(fdst, fsrc, false, false)
}
// MoveDir moves fsrc into fdst
func MoveDir(fdst, fsrc Fs) error {
if Same(fdst, fsrc) {
ErrorLog(fdst, "Nothing to do as source and destination are the same")
return nil
}
// First attempt to use DirMover
if fdstDirMover, ok := fdst.(DirMover); ok && fsrc.Name() == fdst.Name() {
err := fdstDirMover.DirMove(fsrc)
Debug(fdst, "Using server side directory move")
switch err {
case ErrorCantDirMove, ErrorDirExists:
Debug(fdst, "Server side directory move failed - fallback to copy/delete: %v", err)
case nil:
Debug(fdst, "Server side directory move succeeded")
return nil
default:
Stats.Error()
ErrorLog(fdst, "Server side directory move failed: %v", err)
return err
}
}
// Now move the files
err := syncCopyMove(fdst, fsrc, false, true)
if err != nil || Stats.Errored() {
ErrorLog(fdst, "Not deleting files as there were IO errors")
return err
}
return Purge(fsrc)
}
// Check the files in fsrc and fdst according to Size and MD5SUM
// Checks the files in fsrc and fdst according to Size and MD5SUM
func Check(fdst, fsrc Fs) error {
Log(fdst, "Building file list")
// Read the destination files first
// FIXME could do this in parallel and make it use less memory
dstFiles := readFilesMap(fdst)
dstFiles := make(map[string]Object)
for dst := range fdst.List() {
dstFiles[dst.Remote()] = dst
}
// Read the source files checking them against dstFiles
// FIXME could do this in parallel and make it use less memory
srcFiles := readFilesMap(fsrc)
// Move all the common files into commonFiles and delete then
// from srcFiles and dstFiles
srcFiles := make(map[string]Object)
commonFiles := make(map[string][]Object)
for remote, src := range srcFiles {
for src := range fsrc.List() {
remote := src.Remote()
if dst, ok := dstFiles[remote]; ok {
commonFiles[remote] = []Object{dst, src}
delete(srcFiles, remote)
delete(dstFiles, remote)
} else {
srcFiles[remote] = src
}
}
Log(fdst, "%d files not in %v", len(dstFiles), fsrc)
for _, dst := range dstFiles {
Stats.Error()
ErrorLog(dst, "File not in %v", fsrc)
Log(dst, "File not in %v", fsrc)
}
Log(fsrc, "%d files not in %s", len(srcFiles), fdst)
for _, src := range srcFiles {
Stats.Error()
ErrorLog(src, "File not in %v", fdst)
Log(src, "File not in %v", fdst)
}
checks := make(chan []Object, Config.Transfers)
@@ -581,17 +384,17 @@ func Check(fdst, fsrc Fs) error {
if src.Size() != dst.Size() {
Stats.DoneChecking(src)
Stats.Error()
ErrorLog(src, "Sizes differ")
Log(src, "Sizes differ")
continue
}
same, _, err := CheckMd5sums(src, dst)
same, err := CheckMd5sums(src, dst)
Stats.DoneChecking(src)
if err != nil {
continue
}
if !same {
Stats.Error()
ErrorLog(src, "Md5sums differ")
Log(src, "Md5sums differ")
}
Debug(src, "OK")
}
@@ -607,7 +410,7 @@ func Check(fdst, fsrc Fs) error {
return nil
}
// ListFn lists the Fs to the supplied function
// List the Fs to the supplied function
//
// Lists in parallel which may get them out of order
func ListFn(f Fs, fn func(Object)) error {
@@ -626,79 +429,58 @@ func ListFn(f Fs, fn func(Object)) error {
return nil
}
// mutex for synchronized output
var outMutex sync.Mutex
// Synchronized fmt.Fprintf
//
// Ignores errors from Fprintf
func syncFprintf(w io.Writer, format string, a ...interface{}) {
outMutex.Lock()
defer outMutex.Unlock()
_, _ = fmt.Fprintf(w, format, a...)
}
// List the Fs to the supplied writer
// List the Fs to stdout
//
// Shows size and path
//
// Lists in parallel which may get them out of order
func List(f Fs, w io.Writer) error {
func List(f Fs) error {
return ListFn(f, func(o Object) {
syncFprintf(w, "%9d %s\n", o.Size(), o.Remote())
fmt.Printf("%9d %s\n", o.Size(), o.Remote())
})
}
// ListLong lists the Fs to the supplied writer
// List the Fs to stdout
//
// Shows size, mod time and path
//
// Lists in parallel which may get them out of order
func ListLong(f Fs, w io.Writer) error {
func ListLong(f Fs) error {
return ListFn(f, func(o Object) {
Stats.Checking(o)
modTime := o.ModTime()
Stats.DoneChecking(o)
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
fmt.Printf("%9d %19s %s\n", o.Size(), modTime.Format("2006-01-02 15:04:05.00000000"), o.Remote())
})
}
// Md5sum list the Fs to the supplied writer
// List the Fs to stdout
//
// Produces the same output as the md5sum command
//
// Lists in parallel which may get them out of order
func Md5sum(f Fs, w io.Writer) error {
func Md5sum(f Fs) error {
return ListFn(f, func(o Object) {
Stats.Checking(o)
md5sum, err := o.Md5sum()
Stats.DoneChecking(o)
if err != nil {
Debug(o, "Failed to read MD5: %v", err)
md5sum = "ERROR"
md5sum = "UNKNOWN"
}
syncFprintf(w, "%32s %s\n", md5sum, o.Remote())
fmt.Printf("%32s %s\n", md5sum, o.Remote())
})
}
// Count counts the objects and their sizes in the Fs
func Count(f Fs) (objects int64, size int64, err error) {
err = ListFn(f, func(o Object) {
atomic.AddInt64(&objects, 1)
atomic.AddInt64(&size, o.Size())
})
return
}
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
func ListDir(f Fs, w io.Writer) error {
// List the directories/buckets/containers in the Fs to stdout
func ListDir(f Fs) error {
for dir := range f.ListDir() {
syncFprintf(w, "%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
fmt.Printf("%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
}
return nil
}
// Mkdir makes a destination directory or container
// Makes a destination directory or container
func Mkdir(f Fs) error {
err := f.Mkdir()
if err != nil {
@@ -708,7 +490,7 @@ func Mkdir(f Fs) error {
return nil
}
// Rmdir removes a container but not if not empty
// Removes a container but not if not empty
func Rmdir(f Fs) error {
if Config.DryRun {
Log(f, "Not deleting as dry run is set")
@@ -722,25 +504,24 @@ func Rmdir(f Fs) error {
return nil
}
// Purge removes a container and all of its contents
// Removes a container and all of its contents
//
// FIXME doesn't delete local directories
func Purge(f Fs) error {
var err error
if purger, ok := f.(Purger); ok {
if Config.DryRun {
Debug(f, "Not purging as --dry-run set")
} else {
err = purger.Purge()
err := purger.Purge()
if err != nil {
Stats.Error()
return err
}
}
} else {
// DeleteFiles and Rmdir observe --dry-run
DeleteFiles(f.List())
err = Rmdir(f)
}
if err != nil {
Stats.Error()
return err
log.Printf("Deleting path")
Rmdir(f)
}
return nil
}

View File

@@ -1,616 +0,0 @@
// Test rclone by doing real transactions to a storage provider to and
// from the local disk
package fs_test
import (
"bytes"
"flag"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest"
// Active file systems
_ "github.com/ncw/rclone/amazonclouddrive"
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
)
// Globals
var (
localName, remoteName string
flocal, fremote fs.Fs
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
SubDir = flag.Bool("subdir", false, "Set to test with a sub directory")
Verbose = flag.Bool("verbose", false, "Set to enable logging")
finalise func()
)
// Write a file
func WriteFile(filePath, content string, t time.Time) {
// FIXME make directories?
filePath = path.Join(localName, filePath)
dirPath := path.Dir(filePath)
err := os.MkdirAll(dirPath, 0770)
if err != nil {
log.Fatalf("Failed to make directories %q: %v", dirPath, err)
}
err = ioutil.WriteFile(filePath, []byte(content), 0600)
if err != nil {
log.Fatalf("Failed to write file %q: %v", filePath, err)
}
err = os.Chtimes(filePath, t, t)
if err != nil {
log.Fatalf("Failed to chtimes file %q: %v", filePath, err)
}
}
var t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
var t2 = fstest.Time("2011-12-25T12:59:59.123456789Z")
var t3 = fstest.Time("2011-12-30T12:59:59.000000000Z")
func TestInit(t *testing.T) {
fs.LoadConfig()
fs.Config.Verbose = *Verbose
fs.Config.Quiet = !*Verbose
var err error
fremote, finalise, err = fstest.RandomRemote(*RemoteName, *SubDir)
if err != nil {
t.Fatalf("Failed to open remote %q: %v", *RemoteName, err)
}
t.Logf("Testing with remote %v", fremote)
localName, err = ioutil.TempDir("", "rclone")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
localName = filepath.ToSlash(localName)
t.Logf("Testing with local %q", localName)
flocal, err = fs.NewFs(localName)
if err != nil {
t.Fatalf("Failed to make %q: %v", remoteName, err)
}
}
func TestCalculateModifyWindow(t *testing.T) {
fs.CalculateModifyWindow(fremote, flocal)
t.Logf("ModifyWindow is %q", fs.Config.ModifyWindow)
}
func TestMkdir(t *testing.T) {
fstest.TestMkdir(t, fremote)
}
// Check dry run is working
func TestCopyWithDryRun(t *testing.T) {
WriteFile("sub dir/hello world", "hello world", t1)
fs.Config.DryRun = true
err := fs.CopyDir(fremote, flocal)
fs.Config.DryRun = false
if err != nil {
t.Fatalf("Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, []fstest.Item{}, fs.Config.ModifyWindow)
}
// Now without dry run
func TestCopy(t *testing.T) {
err := fs.CopyDir(fremote, flocal)
if err != nil {
t.Fatalf("Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Test a server side copy if possible, or the backup path if not
func TestServerSideCopy(t *testing.T) {
fremoteCopy, finaliseCopy, err := fstest.RandomRemote(*RemoteName, *SubDir)
if err != nil {
t.Fatalf("Failed to open remote copy %q: %v", *RemoteName, err)
}
defer finaliseCopy()
t.Logf("Server side copy (if possible) %v -> %v", fremote, fremoteCopy)
err = fs.CopyDir(fremoteCopy, fremote)
if err != nil {
t.Fatalf("Server Side Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremoteCopy, items, fs.Config.ModifyWindow)
}
func TestLsd(t *testing.T) {
var buf bytes.Buffer
err := fs.ListDir(fremote, &buf)
if err != nil {
t.Fatalf("ListDir failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, "sub dir\n") {
t.Fatalf("Result wrong %q", res)
}
}
// Now delete the local file and download it
func TestCopyAfterDelete(t *testing.T) {
err := os.Remove(localName + "/sub dir/hello world")
if err != nil {
t.Fatalf("Remove failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, []fstest.Item{}, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestCopyRedownload(t *testing.T) {
err := fs.CopyDir(flocal, fremote)
if err != nil {
t.Fatalf("Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
// Clean the directory
cleanTempDir(t)
}
// Create a file and sync it. Change the last modified date and resync.
// If we're only doing sync by size and checksum, we expect nothing to
// to be transferred on the second sync.
func TestSyncBasedOnCheckSum(t *testing.T) {
cleanTempDir(t)
fs.Config.CheckSum = true
defer func() { fs.Config.CheckSum = false }()
WriteFile("check sum", "", t1)
local_items := []fstest.Item{
{Path: "check sum", Size: 0, ModTime: t1, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fs.Stats.ResetCounters()
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Initial sync failed: %v", err)
}
// We should have transferred exactly one file.
if fs.Stats.GetTransfers() != 1 {
t.Fatalf("Sync 1: want 1 transfer, got %d", fs.Stats.GetTransfers())
}
remote_items := local_items
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
err = os.Chtimes(localName+"/check sum", t2, t2)
if err != nil {
t.Fatalf("Chtimes failed: %v", err)
}
local_items = []fstest.Item{
{Path: "check sum", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fs.Stats.ResetCounters()
err = fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
// We should have transferred no files
if fs.Stats.GetTransfers() != 0 {
t.Fatalf("Sync 2: want 0 transfers, got %d", fs.Stats.GetTransfers())
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
cleanTempDir(t)
}
// Create a file and sync it. Change the last modified date and the
// file contents but not the size. If we're only doing sync by size
// only, we expect nothing to to be transferred on the second sync.
func TestSyncSizeOnly(t *testing.T) {
cleanTempDir(t)
fs.Config.SizeOnly = true
defer func() { fs.Config.SizeOnly = false }()
WriteFile("sizeonly", "potato", t1)
local_items := []fstest.Item{
{Path: "sizeonly", Size: 6, ModTime: t1, Md5sum: "8ee2027983915ec78acc45027d874316"},
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fs.Stats.ResetCounters()
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Initial sync failed: %v", err)
}
// We should have transferred exactly one file.
if fs.Stats.GetTransfers() != 1 {
t.Fatalf("Sync 1: want 1 transfer, got %d", fs.Stats.GetTransfers())
}
remote_items := local_items
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
// Update mtime, md5sum but not length of file
WriteFile("sizeonly", "POTATO", t2)
local_items = []fstest.Item{
{Path: "sizeonly", Size: 6, ModTime: t2, Md5sum: "8ac6f27a282e4938125482607ccfb55f"},
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fs.Stats.ResetCounters()
err = fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
// We should have transferred no files
if fs.Stats.GetTransfers() != 0 {
t.Fatalf("Sync 2: want 0 transfers, got %d", fs.Stats.GetTransfers())
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
cleanTempDir(t)
}
func TestSyncAfterChangingModtimeOnly(t *testing.T) {
WriteFile("empty space", "", t1)
err := os.Chtimes(localName+"/empty space", t2, t2)
if err != nil {
t.Fatalf("Chtimes failed: %v", err)
}
err = fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestSyncAfterAddingAFile(t *testing.T) {
WriteFile("potato", "------------------------------------------------------------", t3)
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
WriteFile("potato", "smaller but same date", t3)
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Sync after changing a file's contents, modtime but not length
func TestSyncAfterChangingContentsOnly(t *testing.T) {
if fremote.Precision() == fs.ModTimeNotSupported {
t.Logf("ModTimeNotSupported so forcing file to be a different size")
WriteFile("potato", "different size to make sure it syncs", t2)
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
}
WriteFile("potato", "SMALLER BUT SAME DATE", t2)
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Sync after removing a file and adding a file --dry-run
func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
WriteFile("potato2", "------------------------------------------------------------", t1)
err := os.Remove(localName + "/potato")
if err != nil {
t.Fatalf("Remove failed: %v", err)
}
fs.Config.DryRun = true
err = fs.Sync(fremote, flocal)
fs.Config.DryRun = false
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
before := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, before, fs.Config.ModifyWindow)
}
// Sync after removing a file and adding a file
func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Test with exclude
func TestSyncWithExclude(t *testing.T) {
WriteFile("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
fs.Config.Filter.MaxSize = 80
defer func() {
fs.Config.Filter.MaxSize = 0
}()
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Test with exclude and delete excluded
func TestSyncWithExcludeAndDeleleteExcluded(t *testing.T) {
fs.Config.Filter.MaxSize = 40
fs.Config.Filter.DeleteExcluded = true
reset := func() {
fs.Config.Filter.MaxSize = 0
fs.Config.Filter.DeleteExcluded = false
}
defer reset()
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
// Tidy up
reset()
err = os.Remove(localName + "/enormous")
if err != nil {
t.Fatalf("Remove failed: %v", err)
}
err = fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items = []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Test a server side move if possible, or the backup path if not
func TestServerSideMove(t *testing.T) {
fremoteMove, finaliseMove, err := fstest.RandomRemote(*RemoteName, *SubDir)
if err != nil {
t.Fatalf("Failed to open remote move %q: %v", *RemoteName, err)
}
defer finaliseMove()
t.Logf("Server side move (if possible) %v -> %v", fremote, fremoteMove)
// Start with a copy
err = fs.CopyDir(fremoteMove, fremote)
if err != nil {
t.Fatalf("Server Side Copy failed: %v", err)
}
// Remove one file
obj := fremoteMove.NewFsObject("potato2")
if obj == nil {
t.Fatalf("Failed to find potato2")
}
err = obj.Remove()
if err != nil {
t.Fatalf("Failed to remove object: %v", err)
}
// Do server side move
err = fs.MoveDir(fremoteMove, fremote)
if err != nil {
t.Fatalf("Server Side Move failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, fremote, items[:0], fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremoteMove, items, fs.Config.ModifyWindow)
// Move it back again, dst does not exist this time
err = fs.MoveDir(fremote, fremoteMove)
if err != nil {
t.Fatalf("Server Side Move 2 failed: %v", err)
}
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremoteMove, items[:0], fs.Config.ModifyWindow)
}
func TestLs(t *testing.T) {
var buf bytes.Buffer
err := fs.List(fremote, &buf)
if err != nil {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, " 0 empty space\n") {
t.Errorf("empty space missing: %q", res)
}
if !strings.Contains(res, " 60 potato2\n") {
t.Errorf("potato2 missing: %q", res)
}
}
func TestLsLong(t *testing.T) {
var buf bytes.Buffer
err := fs.ListLong(fremote, &buf)
if err != nil {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
lines := strings.Split(strings.Trim(res, "\n"), "\n")
if len(lines) != 2 {
t.Fatalf("Wrong number of lines in list: %q", lines)
}
timeFormat := "2006-01-02 15:04:05.000000000"
precision := fremote.Precision()
location := time.Now().Location()
checkTime := func(m, filename string, expected time.Time) {
modTime, err := time.ParseInLocation(timeFormat, m, location) // parse as localtime
if err != nil {
t.Errorf("Error parsing %q: %v", m, err)
} else {
dt, ok := fstest.CheckTimeEqualWithPrecision(expected, modTime, precision)
if !ok {
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", filename, dt, precision, modTime, expected, precision)
}
}
}
m1 := regexp.MustCompile(`(?m)^ 0 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) empty space$`)
if ms := m1.FindStringSubmatch(res); ms == nil {
t.Errorf("empty space missing: %q", res)
} else {
checkTime(ms[1], "empty space", t2.Local())
}
m2 := regexp.MustCompile(`(?m)^ 60 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) potato2$`)
if ms := m2.FindStringSubmatch(res); ms == nil {
t.Errorf("potato2 missing: %q", res)
} else {
checkTime(ms[1], "potato2", t1.Local())
}
}
func TestMd5sum(t *testing.T) {
var buf bytes.Buffer
err := fs.Md5sum(fremote, &buf)
if err != nil {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") &&
!strings.Contains(res, " empty space\n") {
t.Errorf("empty space missing: %q", res)
}
if !strings.Contains(res, "6548b156ea68a4e003e786df99eee76 potato2\n") &&
!strings.Contains(res, " potato2\n") {
t.Errorf("potato2 missing: %q", res)
}
}
func TestCount(t *testing.T) {
objects, size, err := fs.Count(fremote)
if err != nil {
t.Fatalf("Count failed: %v", err)
}
if objects != 2 {
t.Errorf("want 2 objects got %d", objects)
}
if size != 60 {
t.Errorf("want size 60 got %d", size)
}
}
func TestCheck(t *testing.T) {
// FIXME
}
// Clean the temporary directory
func cleanTempDir(t *testing.T) {
t.Logf("Cleaning temporary directory: %q", localName)
err := os.RemoveAll(localName)
if err != nil {
t.Logf("Failed to remove %q: %v", localName, err)
}
}
func TestFinalise(t *testing.T) {
finalise()
cleanTempDir(t)
}

View File

@@ -1,34 +0,0 @@
#!/bin/bash
go install
REMOTES="
TestSwift:
TestS3:
TestDrive:
TestGoogleCloudStorage:
TestDropbox:
TestAmazonCloudDrive:
"
REMOTES="
TestAmazonCloudDrive:
"
function test_remote {
args=$@
echo "@go test $args"
go test $args || {
echo "*** test $args FAILED ***"
exit 1
}
}
test_remote
test_remote --subdir
for remote in $REMOTES; do
test_remote --remote $remote
test_remote --remote $remote --subdir
done
echo "All OK"

View File

@@ -1,4 +1,3 @@
package fs
// Version of rclone
const Version = "v1.23"
const Version = "v1.02"

View File

@@ -1,271 +0,0 @@
// Package fstest provides utilities for testing the Fs
package fstest
// FIXME put name of test FS in Fs structure
import (
"io/ioutil"
"log"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/ncw/rclone/fs"
)
// Seed the random number generator
func init() {
rand.Seed(time.Now().UnixNano())
}
// Item represents an item for checking
type Item struct {
Path string
Md5sum string
ModTime time.Time
Size int64
WinPath string
}
// CheckTimeEqualWithPrecision checks the times are equal within the
// precision, returns the delta and a flag
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
dt := t0.Sub(t1)
if dt >= precision || dt <= -precision {
return dt, false
}
return dt, true
}
// CheckModTime checks the mod time to the given precision
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
dt, ok := CheckTimeEqualWithPrecision(modTime, i.ModTime, precision)
if !ok {
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision)
}
}
// Check checks all the attributes of the object are correct
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
if obj == nil {
t.Fatalf("Object is nil")
}
// Check attributes
Md5sum, err := obj.Md5sum()
if err != nil {
t.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
}
if !fs.Md5sumsEqual(i.Md5sum, Md5sum) {
t.Errorf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
}
if i.Size != obj.Size() {
t.Errorf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
}
i.CheckModTime(t, obj, obj.ModTime(), precision)
}
// Items represents all items for checking
type Items struct {
byName map[string]*Item
byNameAlt map[string]*Item
items []Item
}
// NewItems makes an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
byNameAlt: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[items[i].Path] = &items[i]
is.byNameAlt[items[i].WinPath] = &items[i]
}
return is
}
// Find checks off an item
func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
i, ok := is.byName[obj.Remote()]
if !ok {
i, ok = is.byNameAlt[obj.Remote()]
if !ok {
t.Errorf("Unexpected file %q", obj.Remote())
return
}
}
delete(is.byName, i.Path)
delete(is.byName, i.WinPath)
i.Check(t, obj, precision)
}
// Done checks all finished
func (is *Items) Done(t *testing.T) {
if len(is.byName) != 0 {
for name := range is.byName {
log.Printf("Not found %q", name)
}
t.Errorf("%d objects not found", len(is.byName))
}
}
// CheckListingWithPrecision checks the fs to see if it has the
// expected contents with the given precision.
func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision time.Duration) {
is := NewItems(items)
oldErrors := fs.Stats.GetErrors()
var objs []fs.Object
for i := 1; i <= 5; i++ {
objs = nil
for obj := range f.List() {
objs = append(objs, obj)
}
if len(objs) == len(items) {
break
}
t.Logf("Sleeping for 1 second for list eventual consistency: %d/5", i)
time.Sleep(1 * time.Second)
}
for _, obj := range objs {
if obj == nil {
t.Errorf("Unexpected nil in List()")
continue
}
is.Find(t, obj, precision)
}
is.Done(t)
// Don't notice an error when listing an empty directory
if len(items) == 0 && oldErrors == 0 && fs.Stats.GetErrors() == 1 {
fs.Stats.ResetErrors()
}
}
// CheckListing checks the fs to see if it has the expected contents
func CheckListing(t *testing.T, f fs.Fs, items []Item) {
precision := f.Precision()
CheckListingWithPrecision(t, f, items, precision)
}
// Time parses a time string or logs a fatal error
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// RandomString create a random string for test purposes
func RandomString(n int) string {
source := "abcdefghijklmnopqrstuvwxyz0123456789"
out := make([]byte, n)
for i := range out {
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
// LocalRemote creates a temporary directory name for local remotes
func LocalRemote() (path string, err error) {
path, err = ioutil.TempDir("", "rclone")
if err == nil {
// Now remove the directory
err = os.Remove(path)
}
path = filepath.ToSlash(path)
return
}
// RandomRemoteName makes a random bucket or subdirectory name
//
// Returns a random remote name plus the leaf name
func RandomRemoteName(remoteName string) (string, string, error) {
var err error
var leafName string
// Make a directory if remote name is null
if remoteName == "" {
remoteName, err = LocalRemote()
if err != nil {
return "", "", err
}
} else {
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
leafName = RandomString(32)
remoteName += leafName
}
return remoteName, leafName, nil
}
// RandomRemote makes a random bucket or subdirectory on the remote
//
// Call the finalise function returned to Purge the fs at the end (and
// the parent if necessary)
func RandomRemote(remoteName string, subdir bool) (fs.Fs, func(), error) {
var err error
var parentRemote fs.Fs
remoteName, _, err = RandomRemoteName(remoteName)
if err != nil {
return nil, nil, err
}
if subdir {
parentRemote, err = fs.NewFs(remoteName)
if err != nil {
return nil, nil, err
}
remoteName += "/" + RandomString(8)
}
remote, err := fs.NewFs(remoteName)
if err != nil {
return nil, nil, err
}
finalise := func() {
_ = fs.Purge(remote) // ignore error
if parentRemote != nil {
err = fs.Purge(parentRemote) // ignore error
if err != nil {
log.Printf("Failed to purge %v: %v", parentRemote, err)
}
}
}
return remote, finalise, nil
}
// TestMkdir tests Mkdir works
func TestMkdir(t *testing.T, remote fs.Fs) {
err := fs.Mkdir(remote)
if err != nil {
t.Fatalf("Mkdir failed: %v", err)
}
CheckListing(t, remote, []Item{})
}
// TestPurge tests Purge works
func TestPurge(t *testing.T, remote fs.Fs) {
err := fs.Purge(remote)
if err != nil {
t.Fatalf("Purge failed: %v", err)
}
CheckListing(t, remote, []Item{})
}
// TestRmdir tests Rmdir works
func TestRmdir(t *testing.T, remote fs.Fs) {
err := fs.Rmdir(remote)
if err != nil {
t.Fatalf("Rmdir failed: %v", err)
}
}

View File

@@ -1,607 +0,0 @@
// Package fstests provides generic tests for testing the Fs and Object interfaces
//
// Run go generate to write the tests for the remotes
package fstests
//go:generate go run gen_tests.go
import (
"bytes"
"crypto/md5"
"encoding/hex"
"flag"
"io"
"log"
"os"
"strings"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest"
)
var (
remote fs.Fs
// RemoteName should be set to the name of the remote for testing
RemoteName = ""
subRemoteName = ""
subRemoteLeaf = ""
// NilObject should be set to a nil Object from the Fs under test
NilObject fs.Object
file1 = fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: "file name.txt",
}
file2 = fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`,
WinPath: `hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ _ _/z.txt`,
}
)
func init() {
flag.StringVar(&RemoteName, "remote", "", "Set this to override the default remote name (eg s3:)")
}
// TestInit tests basic intitialisation
func TestInit(t *testing.T) {
var err error
fs.LoadConfig()
fs.Config.Verbose = false
fs.Config.Quiet = true
t.Logf("Using remote %q", RemoteName)
if RemoteName == "" {
RemoteName, err = fstest.LocalRemote()
if err != nil {
log.Fatalf("Failed to create tmp dir: %v", err)
}
}
subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(RemoteName)
if err != nil {
t.Fatalf("Couldn't make remote name: %v", err)
}
remote, err = fs.NewFs(subRemoteName)
if err == fs.ErrorNotFoundInConfigFile {
log.Printf("Didn't find %q in config file - skipping tests", RemoteName)
return
}
if err != nil {
t.Fatalf("Couldn't start FS: %v", err)
}
fstest.TestMkdir(t, remote)
}
func skipIfNotOk(t *testing.T) {
if remote == nil {
t.Skip("FS not configured")
}
}
// TestFsString tests the String method
func TestFsString(t *testing.T) {
skipIfNotOk(t)
str := remote.String()
if str == "" {
t.Fatal("Bad fs.String()")
}
}
// TestFsRmdirEmpty tests deleting an empty directory
func TestFsRmdirEmpty(t *testing.T) {
skipIfNotOk(t)
fstest.TestRmdir(t, remote)
}
// TestFsRmdirNotFound tests deleting a non existent directory
func TestFsRmdirNotFound(t *testing.T) {
skipIfNotOk(t)
err := remote.Rmdir()
if err == nil {
t.Fatalf("Expecting error on Rmdir non existent")
}
}
// TestFsMkdir tests tests making a directory
func TestFsMkdir(t *testing.T) {
skipIfNotOk(t)
fstest.TestMkdir(t, remote)
fstest.TestMkdir(t, remote)
}
// TestFsListEmpty tests listing an empty directory
func TestFsListEmpty(t *testing.T) {
skipIfNotOk(t)
fstest.CheckListing(t, remote, []fstest.Item{})
}
// TestFsListDirEmpty tests listing the directories from an empty directory
func TestFsListDirEmpty(t *testing.T) {
skipIfNotOk(t)
for obj := range remote.ListDir() {
t.Errorf("Found unexpected item %q", obj.Name)
}
}
// TestFsNewFsObjectNotFound tests not finding a object
func TestFsNewFsObjectNotFound(t *testing.T) {
skipIfNotOk(t)
if remote.NewFsObject("potato") != nil {
t.Fatal("Didn't expect to find object")
}
}
func findObject(t *testing.T, Name string) fs.Object {
obj := remote.NewFsObject(Name)
if obj == nil {
t.Fatalf("Object not found: %q", Name)
}
return obj
}
func testPut(t *testing.T, file *fstest.Item) {
buf := bytes.NewBufferString(fstest.RandomString(100))
hash := md5.New()
in := io.TeeReader(buf, hash)
file.Size = int64(buf.Len())
obj, err := remote.Put(in, file.Path, file.ModTime, file.Size)
if err != nil {
t.Fatal("Put error", err)
}
file.Md5sum = hex.EncodeToString(hash.Sum(nil))
file.Check(t, obj, remote.Precision())
// Re-read the object and check again
obj = findObject(t, file.Path)
file.Check(t, obj, remote.Precision())
}
// TestFsPutFile1 tests putting a file
func TestFsPutFile1(t *testing.T) {
skipIfNotOk(t)
testPut(t, &file1)
}
// TestFsPutFile2 tests putting a file into a subdirectory
func TestFsPutFile2(t *testing.T) {
skipIfNotOk(t)
testPut(t, &file2)
}
// TestFsListDirFile2 tests the files are correctly uploaded
func TestFsListDirFile2(t *testing.T) {
skipIfNotOk(t)
found := false
for obj := range remote.ListDir() {
if obj.Name != `hello? sausage` && obj.Name != `hello_ sausage` {
t.Errorf("Found unexpected item %q", obj.Name)
} else {
found = true
}
}
if !found {
t.Errorf("Didn't find %q", `hello? sausage`)
}
}
// TestFsListDirRoot tests that DirList works in the root
func TestFsListDirRoot(t *testing.T) {
skipIfNotOk(t)
rootRemote, err := fs.NewFs(RemoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", RemoteName, err)
}
found := false
for obj := range rootRemote.ListDir() {
if obj.Name == subRemoteLeaf {
found = true
}
}
if !found {
t.Errorf("Didn't find %q", subRemoteLeaf)
}
}
// TestFsListRoot tests List works in the root
func TestFsListRoot(t *testing.T) {
skipIfNotOk(t)
rootRemote, err := fs.NewFs(RemoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", RemoteName, err)
}
// Should either find file1 and file2 or nothing
found1 := false
f1 := subRemoteLeaf + "/" + file1.Path
found2 := false
f2 := subRemoteLeaf + "/" + file2.Path
f2Alt := subRemoteLeaf + "/" + file2.WinPath
count := 0
errors := fs.Stats.GetErrors()
for obj := range rootRemote.List() {
count++
if obj.Remote() == f1 {
found1 = true
}
if obj.Remote() == f2 || obj.Remote() == f2Alt {
found2 = true
}
}
errors -= fs.Stats.GetErrors()
if count == 0 {
if errors == 0 {
t.Error("Expecting error if count==0")
}
return
}
if found1 && found2 {
if errors != 0 {
t.Error("Not expecting error if found")
}
return
}
t.Errorf("Didn't find %q (%v) and %q (%v) or no files (count %d)", f1, found1, f2, found2, count)
}
// TestFsListFile1 tests file present
func TestFsListFile1(t *testing.T) {
skipIfNotOk(t)
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
}
// TestFsNewFsObject tests NewFsObject
func TestFsNewFsObject(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
file1.Check(t, obj, remote.Precision())
}
// TestFsListFile1and2 tests two files present
func TestFsListFile1and2(t *testing.T) {
skipIfNotOk(t)
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
}
// TestFsCopy tests Copy
func TestFsCopy(t *testing.T) {
skipIfNotOk(t)
// Check have Copy
_, ok := remote.(fs.Copier)
if !ok {
t.Skip("FS has no Copier interface")
}
var file1Copy = file1
file1Copy.Path += "-copy"
// do the copy
src := findObject(t, file1.Path)
dst, err := remote.(fs.Copier).Copy(src, file1Copy.Path)
if err != nil {
t.Errorf("Copy failed: %v", err)
}
// check file exists in new listing
fstest.CheckListing(t, remote, []fstest.Item{file1, file2, file1Copy})
// Check dst lightly - list above has checked ModTime/Md5sum
if dst.Remote() != file1Copy.Path {
t.Errorf("object path: want %q got %q", file1Copy.Path, dst.Remote())
}
// Delete copy
err = dst.Remove()
if err != nil {
t.Fatal("Remove copy error", err)
}
}
// TestFsMove tests Move
func TestFsMove(t *testing.T) {
skipIfNotOk(t)
// Check have Move
_, ok := remote.(fs.Mover)
if !ok {
t.Skip("FS has no Mover interface")
}
var file1Move = file1
file1Move.Path += "-move"
// do the move
src := findObject(t, file1.Path)
dst, err := remote.(fs.Mover).Move(src, file1Move.Path)
if err != nil {
t.Fatalf("Move failed: %v", err)
}
// check file exists in new listing
fstest.CheckListing(t, remote, []fstest.Item{file2, file1Move})
// Check dst lightly - list above has checked ModTime/Md5sum
if dst.Remote() != file1Move.Path {
t.Errorf("object path: want %q got %q", file1Move.Path, dst.Remote())
}
// move it back
src = findObject(t, file1Move.Path)
_, err = remote.(fs.Mover).Move(src, file1.Path)
if err != nil {
t.Errorf("Move failed: %v", err)
}
// check file exists in new listing
fstest.CheckListing(t, remote, []fstest.Item{file2, file1})
}
// Move src to this remote using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
// TestFsDirMove tests DirMove
func TestFsDirMove(t *testing.T) {
skipIfNotOk(t)
// Check have DirMove
_, ok := remote.(fs.DirMover)
if !ok {
t.Skip("FS has no DirMover interface")
}
// Check it can't move onto itself
err := remote.(fs.DirMover).DirMove(remote)
if err != fs.ErrorDirExists {
t.Errorf("Expecting fs.ErrorDirExists got: %v", err)
}
// new remote
newRemote, removeNewRemote, err := fstest.RandomRemote(RemoteName, false)
if err != nil {
t.Fatalf("Failed to create remote: %v", err)
}
defer removeNewRemote()
// try the move
err = newRemote.(fs.DirMover).DirMove(remote)
if err != nil {
t.Errorf("Failed to DirMove: %v", err)
}
// check remotes
// FIXME: Prints errors.
fstest.CheckListing(t, remote, []fstest.Item{})
fstest.CheckListing(t, newRemote, []fstest.Item{file2, file1})
// move it back
err = remote.(fs.DirMover).DirMove(newRemote)
if err != nil {
t.Errorf("Failed to DirMove: %v", err)
}
// check remotes
fstest.CheckListing(t, remote, []fstest.Item{file2, file1})
fstest.CheckListing(t, newRemote, []fstest.Item{})
}
// TestFsRmdirFull tests removing a non empty directory
func TestFsRmdirFull(t *testing.T) {
skipIfNotOk(t)
err := remote.Rmdir()
if err == nil {
t.Fatalf("Expecting error on RMdir on non empty remote")
}
}
// TestFsPrecision tests the Precision of the Fs
func TestFsPrecision(t *testing.T) {
skipIfNotOk(t)
precision := remote.Precision()
if precision == fs.ModTimeNotSupported {
return
}
if precision > time.Second || precision < 0 {
t.Fatalf("Precision out of range %v", precision)
}
// FIXME check expected precision
}
// TestObjectString tests the Object String method
func TestObjectString(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
s := obj.String()
if s != file1.Path {
t.Errorf("String() wrong %v != %v", s, file1.Path)
}
obj = NilObject
s = obj.String()
if s != "<nil>" {
t.Errorf("String() wrong %v != %v", s, "<nil>")
}
}
// TestObjectFs tests the object can be found
func TestObjectFs(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if obj.Fs() != remote {
t.Errorf("Fs is wrong %v != %v", obj.Fs(), remote)
}
}
// TestObjectRemote tests the Remote is correct
func TestObjectRemote(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if obj.Remote() != file1.Path {
t.Errorf("Remote is wrong %v != %v", obj.Remote(), file1.Path)
}
}
// TestObjectMd5sum tests the MD5SUM of the object is correct
func TestObjectMd5sum(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
Md5sum, err := obj.Md5sum()
if err != nil {
t.Errorf("Error in Md5sum: %v", err)
}
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
}
}
// TestObjectModTime tests the ModTime of the object is correct
func TestObjectModTime(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
}
// TestObjectSetModTime tests that SetModTime works
func TestObjectSetModTime(t *testing.T) {
skipIfNotOk(t)
newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z")
obj := findObject(t, file1.Path)
obj.SetModTime(newModTime)
file1.ModTime = newModTime
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
// And make a new object and read it from there too
TestObjectModTime(t)
}
// TestObjectSize tests that Size works
func TestObjectSize(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if obj.Size() != file1.Size {
t.Errorf("Size is wrong %v != %v", obj.Size(), file1.Size)
}
}
// TestObjectOpen tests that Open works
func TestObjectOpen(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
in, err := obj.Open()
if err != nil {
t.Fatalf("Open() return error: %v", err)
}
hash := md5.New()
n, err := io.Copy(hash, in)
if err != nil {
t.Fatalf("io.Copy() return error: %v", err)
}
if n != file1.Size {
t.Fatalf("Read wrong number of bytes %d != %d", n, file1.Size)
}
err = in.Close()
if err != nil {
t.Fatalf("in.Close() return error: %v", err)
}
Md5sum := hex.EncodeToString(hash.Sum(nil))
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
}
}
// TestObjectUpdate tests that Update works
func TestObjectUpdate(t *testing.T) {
skipIfNotOk(t)
buf := bytes.NewBufferString(fstest.RandomString(200))
hash := md5.New()
in := io.TeeReader(buf, hash)
file1.Size = int64(buf.Len())
obj := findObject(t, file1.Path)
err := obj.Update(in, file1.ModTime, file1.Size)
if err != nil {
t.Fatal("Update error", err)
}
file1.Md5sum = hex.EncodeToString(hash.Sum(nil))
file1.Check(t, obj, remote.Precision())
// Re-read the object and check again
obj = findObject(t, file1.Path)
file1.Check(t, obj, remote.Precision())
}
// TestObjectStorable tests that Storable works
func TestObjectStorable(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if !obj.Storable() {
t.Fatalf("Expecting %v to be storable", obj)
}
}
// TestLimitedFs tests that a LimitedFs is created
func TestLimitedFs(t *testing.T) {
skipIfNotOk(t)
remoteName := subRemoteName + "/" + file2.Path
file2Copy := file2
file2Copy.Path = "z.txt"
fileRemote, err := fs.NewFs(remoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", remoteName, err)
}
fstest.CheckListing(t, fileRemote, []fstest.Item{file2Copy})
_, ok := fileRemote.(*fs.Limited)
if !ok {
t.Errorf("%v is not a fs.Limited", fileRemote)
}
}
// TestLimitedFsNotFound tests that a LimitedFs is not created if no object
func TestLimitedFsNotFound(t *testing.T) {
skipIfNotOk(t)
remoteName := subRemoteName + "/not found.txt"
fileRemote, err := fs.NewFs(remoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", remoteName, err)
}
fstest.CheckListing(t, fileRemote, []fstest.Item{})
_, ok := fileRemote.(*fs.Limited)
if ok {
t.Errorf("%v is is a fs.Limited", fileRemote)
}
}
// TestObjectRemove tests Remove
func TestObjectRemove(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
err := obj.Remove()
if err != nil {
t.Fatal("Remove error", err)
}
fstest.CheckListing(t, remote, []fstest.Item{file2})
}
// TestObjectPurge tests Purge
func TestObjectPurge(t *testing.T) {
skipIfNotOk(t)
fstest.TestPurge(t, remote)
err := fs.Purge(remote)
if err == nil {
t.Fatal("Expecting error after on second purge")
}
}
// TestFinalise tidies up after the previous tests
func TestFinalise(t *testing.T) {
skipIfNotOk(t)
if strings.HasPrefix(RemoteName, "/") {
// Remove temp directory
err := os.Remove(RemoteName)
if err != nil {
log.Printf("Failed to remove %q: %v\n", RemoteName, err)
}
}
}

View File

@@ -1,137 +0,0 @@
// +build ignore
// Make the test files from fstests.go
package main
import (
"bufio"
"html/template"
"log"
"os"
"os/exec"
"regexp"
"strings"
)
// Search fstests.go and return all the test function names
func findTestFunctions() []string {
fns := []string{}
matcher := regexp.MustCompile(`^func\s+(Test.*?)\(`)
in, err := os.Open("fstests.go")
if err != nil {
log.Fatalf("Couldn't open fstests.go: %v", err)
}
defer in.Close()
scanner := bufio.NewScanner(in)
for scanner.Scan() {
line := scanner.Text()
matches := matcher.FindStringSubmatch(line)
if len(matches) > 0 {
fns = append(fns, matches[1])
}
}
if err := scanner.Err(); err != nil {
log.Fatalf("Error scanning file: %v", err)
}
return fns
}
// Data to substitute
type Data struct {
Regenerate string
FsName string
UpperFsName string
TestName string
ObjectName string
Fns []string
}
var testProgram = `
// Test {{ .UpperFsName }} filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: {{ .Regenerate }}
package {{ .FsName }}_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/{{ .FsName }}"
)
func init() {
fstests.NilObject = fs.Object((*{{ .FsName }}.FsObject{{ .ObjectName }})(nil))
fstests.RemoteName = "{{ .TestName }}"
}
// Generic tests for the Fs
{{ range $fn := .Fns }}func {{ $fn }}(t *testing.T){ fstests.{{ $fn }}(t) }
{{ end }}
`
// Generate test file piping it through gofmt
func generateTestProgram(t *template.Template, fns []string, Fsname, ObjectName string) {
fsname := strings.ToLower(Fsname)
TestName := "Test" + Fsname + ":"
outfile := "../../" + fsname + "/" + fsname + "_test.go"
if fsname == "local" {
TestName = ""
}
data := Data{
Regenerate: "make gen_tests",
FsName: fsname,
UpperFsName: Fsname,
TestName: TestName,
ObjectName: ObjectName,
Fns: fns,
}
cmd := exec.Command("gofmt")
log.Printf("Writing %q", outfile)
out, err := os.Create(outfile)
if err != nil {
log.Fatal(err)
}
cmd.Stdout = out
gofmt, err := cmd.StdinPipe()
if err != nil {
log.Fatal(err)
}
if err = cmd.Start(); err != nil {
log.Fatal(err)
}
if err = t.Execute(gofmt, data); err != nil {
log.Fatal(err)
}
if err = gofmt.Close(); err != nil {
log.Fatal(err)
}
if err = cmd.Wait(); err != nil {
log.Fatal(err)
}
if err = out.Close(); err != nil {
log.Fatal(err)
}
}
func main() {
fns := findTestFunctions()
t := template.Must(template.New("main").Parse(testProgram))
generateTestProgram(t, fns, "Local", "Local")
generateTestProgram(t, fns, "Swift", "Swift")
generateTestProgram(t, fns, "S3", "S3")
generateTestProgram(t, fns, "Drive", "Drive")
generateTestProgram(t, fns, "GoogleCloudStorage", "Storage")
generateTestProgram(t, fns, "Dropbox", "Dropbox")
generateTestProgram(t, fns, "AmazonCloudDrive", "Acd")
log.Printf("Done")
}

138
googleauth/googleauth.go Normal file
View File

@@ -0,0 +1,138 @@
// Common authentication between Google Drive and Google Cloud Storage
package googleauth
import (
"encoding/json"
"fmt"
"log"
"net/http"
"code.google.com/p/goauth2/oauth"
"github.com/ncw/rclone/fs"
)
// A token cache to save the token in the config file section named
type TokenCache string
// Get the token from the config file - returns an error if it isn't present
func (name TokenCache) Token() (*oauth.Token, error) {
tokenString, err := fs.ConfigFile.GetValue(string(name), "token")
if err != nil {
return nil, err
}
if tokenString == "" {
return nil, fmt.Errorf("Empty token found - please reconfigure")
}
token := new(oauth.Token)
err = json.Unmarshal([]byte(tokenString), token)
if err != nil {
return nil, err
}
return token, nil
}
// Save the token to the config file
//
// This saves the config file if it changes
func (name TokenCache) PutToken(token *oauth.Token) error {
tokenBytes, err := json.Marshal(token)
if err != nil {
return err
}
tokenString := string(tokenBytes)
old := fs.ConfigFile.MustValue(string(name), "token")
if tokenString != old {
fs.ConfigFile.SetValue(string(name), "token", tokenString)
fs.SaveConfig()
}
return nil
}
// Auth contains information to authenticate an app against google services
type Auth struct {
Scope string
DefaultClientId string
DefaultClientSecret string
}
// Makes a new transport using authorisation from the config
//
// Doesn't have a token yet
func (auth *Auth) newTransport(name string) (*oauth.Transport, error) {
clientId := fs.ConfigFile.MustValue(name, "client_id")
if clientId == "" {
clientId = auth.DefaultClientId
}
clientSecret := fs.ConfigFile.MustValue(name, "client_secret")
if clientSecret == "" {
clientSecret = auth.DefaultClientSecret
}
// Settings for authorization.
var config = &oauth.Config{
ClientId: clientId,
ClientSecret: clientSecret,
Scope: auth.Scope,
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
AuthURL: "https://accounts.google.com/o/oauth2/auth",
TokenURL: "https://accounts.google.com/o/oauth2/token",
TokenCache: TokenCache(name),
}
t := &oauth.Transport{
Config: config,
Transport: http.DefaultTransport,
}
return t, nil
}
// Makes a new transport using authorisation from the config with token
func (auth *Auth) NewTransport(name string) (*oauth.Transport, error) {
t, err := auth.newTransport(name)
if err != nil {
return nil, err
}
// Try to pull the token from the cache; if this fails, we need to get one.
token, err := t.Config.TokenCache.Token()
if err != nil {
return nil, fmt.Errorf("Failed to get token: %s", err)
}
t.Token = token
return t, nil
}
// Configuration helper - called after the user has put in the defaults
func (auth *Auth) Config(name string) {
// See if already have a token
tokenString := fs.ConfigFile.MustValue(name, "token")
if tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !fs.Confirm() {
return
}
}
// Get a transport
t, err := auth.newTransport(name)
if err != nil {
log.Fatalf("Couldn't make transport: %v", err)
}
// Generate a URL for the user to visit for authorization.
authUrl := t.Config.AuthCodeURL("state")
fmt.Printf("Go to the following link in your browser\n")
fmt.Printf("%s\n", authUrl)
fmt.Printf("Log in, then type paste the token that is returned in the browser here\n")
// Read the code, and exchange it for a token.
fmt.Printf("Enter verification code> ")
authCode := fs.ReadLine()
_, err = t.Exchange(authCode)
if err != nil {
log.Fatalf("Failed to get token: %v", err)
}
}

View File

@@ -1,4 +1,4 @@
// Package googlecloudstorage provides an interface to Google Cloud Storage
// Google Cloud Storage interface
package googlecloudstorage
/*
@@ -17,59 +17,52 @@ import (
"encoding/hex"
"fmt"
"io"
"log"
"mime"
"net/http"
"path"
"regexp"
"strings"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/api/storage/v1"
"code.google.com/p/google-api-go-client/googleapi"
"code.google.com/p/google-api-go-client/storage/v1"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/googleauth"
)
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneClientSecret = "8p/yms3OlNXE9OTDl/HLypf9gdiJ5cT3"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
rcloneClientId = "202264815644.apps.googleusercontent.com"
rcloneClientSecret = "X4Z3ca8xfWDb1Voo-F9a7ZxJ"
RFC3339In = time.RFC3339
RFC3339Out = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 256 // chunk size to read directory listings
)
var (
// Description of how to auth for this app
storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageFullControlScope},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: fs.Reveal(rcloneClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
storageAuth = &googleauth.Auth{
Scope: storage.DevstorageFull_controlScope,
DefaultClientId: rcloneClientId,
DefaultClientSecret: rcloneClientSecret,
}
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
fs.Register(&fs.FsInfo{
Name: "google cloud storage",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config(name, storageConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
storageAuth.Config(name)
},
Options: []fs.Option{{
Name: oauthutil.ConfigClientID,
Help: "Google Application Client Id - leave blank normally.",
Name: "client_id",
Help: "Google Application Client Id - leave blank to use rclone's.",
}, {
Name: oauthutil.ConfigClientSecret,
Help: "Google Application Client Secret - leave blank normally.",
Name: "client_secret",
Help: "Google Application Client Secret - leave blank to use rclone's.",
}, {
Name: "project_number",
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
@@ -120,7 +113,6 @@ func init() {
// FsStorage represents a remote storage server
type FsStorage struct {
name string // name of this remote
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
@@ -144,19 +136,6 @@ type FsObjectStorage struct {
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *FsStorage) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *FsStorage) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this FsStorage to a string
func (f *FsStorage) String() string {
if f.root == "" {
@@ -182,9 +161,9 @@ func parsePath(path string) (bucket, directory string, err error) {
// NewFs contstructs an FsStorage from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
oAuthClient, err := oauthutil.NewClient(name, storageConfig)
t, err := storageAuth.NewTransport(name)
if err != nil {
log.Fatalf("Failed to configure Google Cloud Storage: %v", err)
return nil, err
}
bucket, directory, err := parsePath(root)
@@ -193,7 +172,6 @@ func NewFs(name, root string) (fs.Fs, error) {
}
f := &FsStorage{
name: name,
bucket: bucket,
root: directory,
projectNumber: fs.ConfigFile.MustValue(name, "project_number"),
@@ -208,7 +186,7 @@ func NewFs(name, root string) (fs.Fs, error) {
}
// Create a new authorized Drive client.
f.client = oAuthClient
f.client = t.Client()
f.svc, err = storage.New(f.client)
if err != nil {
return nil, fmt.Errorf("Couldn't create Google Cloud Storage client: %s", err)
@@ -237,7 +215,7 @@ func NewFs(name, root string) (fs.Fs, error) {
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsStorage) newFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
func (f *FsStorage) NewFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
o := &FsObjectStorage{
storage: f,
remote: remote,
@@ -254,11 +232,11 @@ func (f *FsStorage) newFsObjectWithInfo(remote string, info *storage.Object) fs.
return o
}
// NewFsObject returns an FsObject from a path
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsStorage) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
return f.NewFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
@@ -274,26 +252,19 @@ func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
objects, err := list.Do()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err)
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
return
}
if !directories {
for _, object := range objects.Items {
if !strings.HasPrefix(object.Name, f.root) {
fs.Log(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
fn(remote, object)
for _, object := range objects.Items {
if directories && !strings.HasSuffix(object.Name, "/") {
continue
}
} else {
var object storage.Object
for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") {
continue
}
fn(prefix[:len(prefix)-1], &object)
if !strings.HasPrefix(object.Name, f.root) {
fs.Log(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
fn(remote, object)
}
if objects.NextPageToken == "" {
break
@@ -302,20 +273,20 @@ func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
}
}
// List walks the path returning a channel of FsObjects
// Walk the path returning a channel of FsObjects
func (f *FsStorage) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
if f.bucket == "" {
// Return no objects at top level list
close(out)
fs.Stats.Error()
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
} else {
// List the objects
go func() {
defer close(out)
f.list(false, func(remote string, object *storage.Object) {
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
@@ -324,7 +295,7 @@ func (f *FsStorage) List() fs.ObjectsChan {
return out
}
// ListDir lists the buckets
// Lists the buckets
func (f *FsStorage) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
if f.bucket == "" {
@@ -333,7 +304,7 @@ func (f *FsStorage) ListDir() fs.DirChan {
defer close(out)
if f.projectNumber == "" {
fs.Stats.Error()
fs.ErrorLog(f, "Can't list buckets without project number")
fs.Log(f, "Can't list buckets without project number")
return
}
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
@@ -341,7 +312,7 @@ func (f *FsStorage) ListDir() fs.DirChan {
buckets, err := listBuckets.Do()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list buckets: %v", err)
fs.Log(f, "Couldn't list buckets: %v", err)
break
} else {
for _, bucket := range buckets.Items {
@@ -381,8 +352,8 @@ func (f *FsStorage) ListDir() fs.DirChan {
// The new object may have been created if an error is returned
func (f *FsStorage) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction
o := &FsObjectStorage{storage: f, remote: remote}
return o, o.Update(in, modTime, size)
fs := &FsObjectStorage{storage: f, remote: remote}
return fs, fs.Update(in, modTime, size)
}
// Mkdir creates the bucket if it doesn't exist
@@ -412,46 +383,14 @@ func (f *FsStorage) Rmdir() error {
return f.svc.Buckets.Delete(f.bucket).Do()
}
// Precision returns the precision
func (f *FsStorage) Precision() time.Duration {
// Return the precision
func (fs *FsStorage) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *FsStorage) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*FsObjectStorage)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary FsObject under construction
dstObj := &FsObjectStorage{storage: f, remote: remote}
srcBucket := srcObj.storage.bucket
srcObject := srcObj.storage.root + srcObj.remote
dstBucket := f.bucket
dstObject := f.root + remote
newObject, err := f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
if err != nil {
return nil, err
}
// Set the metadata for the new object while we have it
dstObj.setMetaData(newObject)
return dstObj, nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
// Return the parent Fs
func (o *FsObjectStorage) Fs() fs.Fs {
return o.storage
}
@@ -464,7 +403,7 @@ func (o *FsObjectStorage) String() string {
return o.remote
}
// Remote returns the remote path
// Return the remote path
func (o *FsObjectStorage) Remote() string {
return o.remote
}
@@ -495,16 +434,17 @@ func (o *FsObjectStorage) setMetaData(info *storage.Object) {
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(timeFormatIn, mtimeString)
modTime, err := time.Parse(RFC3339In, mtimeString)
if err == nil {
o.modTime = modTime
return
} else {
fs.Debug(o, "Failed to read mtime from metadata: %s", err)
}
fs.Debug(o, "Failed to read mtime from metadata: %s", err)
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormatIn, info.Updated)
modTime, err := time.Parse(RFC3339In, info.Updated)
if err != nil {
fs.Log(o, "Bad time decode: %v", err)
} else {
@@ -544,11 +484,11 @@ func (o *FsObjectStorage) ModTime() time.Time {
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormatOut)
metadata[metaMtime] = modTime.Format(RFC3339Out)
return metadata
}
// SetModTime sets the modification time of the local fs object
// Sets the modification time of the local fs object
func (o *FsObjectStorage) SetModTime(modTime time.Time) {
// This only adds metadata so will perserve other metadata
object := storage.Object{
@@ -556,15 +496,14 @@ func (o *FsObjectStorage) SetModTime(modTime time.Time) {
Name: o.storage.root + o.remote,
Metadata: metadataFromModTime(modTime),
}
newObject, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do()
_, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
fs.Log(o, "Failed to update remote mtime: %s", err)
}
o.setMetaData(newObject)
}
// Storable returns a boolean as to whether this object is storable
// Is this object storable
func (o *FsObjectStorage) Storable() bool {
return true
}
@@ -591,7 +530,7 @@ func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
return nil, err
}
if res.StatusCode != 200 {
_ = res.Body.Close() // ignore error
res.Body.Close()
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
@@ -601,21 +540,24 @@ func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
//
// The new object may have been created if an error is returned
func (o *FsObjectStorage) Update(in io.Reader, modTime time.Time, size int64) error {
// Guess the content type
contentType := mime.TypeByExtension(path.Ext(o.remote))
if contentType == "" {
contentType = "application/octet-stream"
}
object := storage.Object{
Bucket: o.storage.bucket,
Name: o.storage.root + o.remote,
ContentType: fs.MimeType(o),
ContentType: contentType,
Size: uint64(size),
Updated: modTime.Format(timeFormatOut), // Doesn't get set
Updated: modTime.Format(RFC3339Out), // Doesn't get set
Metadata: metadataFromModTime(modTime),
}
newObject, err := o.storage.svc.Objects.Insert(o.storage.bucket, &object).Media(in).Name(object.Name).PredefinedAcl(o.storage.objectAcl).Do()
if err != nil {
return err
}
// Set the metadata for the new object while we have it
o.setMetaData(newObject)
return nil
return err
}
// Remove an object
@@ -625,5 +567,4 @@ func (o *FsObjectStorage) Remove() error {
// Check the interfaces are satisfied
var _ fs.Fs = &FsStorage{}
var _ fs.Copier = &FsStorage{}
var _ fs.Object = &FsObjectStorage{}

View File

@@ -1,56 +0,0 @@
// Test GoogleCloudStorage filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package googlecloudstorage_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/googlecloudstorage"
)
func init() {
fstests.NilObject = fs.Object((*googlecloudstorage.FsObjectStorage)(nil))
fstests.RemoteName = "TestGoogleCloudStorage:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,4 +1,4 @@
// Package local provides a filesystem interface
// Local filesystem interface
package local
import (
@@ -8,21 +8,19 @@ import (
"hash"
"io"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/ncw/rclone/fs"
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
fs.Register(&fs.FsInfo{
Name: "local",
NewFs: NewFs,
})
@@ -30,16 +28,14 @@ func init() {
// FsLocal represents a local filesystem rooted at root
type FsLocal struct {
name string // the name of the remote
root string // The root directory
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warned map[string]struct{} // whether we have warned about this string
root string // The root directory
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
}
// FsObjectLocal represents a local filesystem object
type FsObjectLocal struct {
local *FsLocal // The Fs this object is part of
local fs.Fs // The Fs this object is part of
remote string // The remote path
path string // The local path
info os.FileInfo // Interface for file info (always present)
@@ -48,22 +44,16 @@ type FsObjectLocal struct {
// ------------------------------------------------------------
// NewFs constructs an FsLocal from the path
// NewFs contstructs an FsLocal from the path
func NewFs(name, root string) (fs.Fs, error) {
var err error
f := &FsLocal{
name: name,
warned: make(map[string]struct{}),
}
f.root = filterPath(f.cleanUtf8(root))
root = path.Clean(root)
f := &FsLocal{root: root}
// Check to see if this points to a file
fi, err := os.Lstat(f.root)
if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root
var remote string
f.root, remote = getDirFile(f.root)
remote := path.Base(root)
f.root = path.Dir(root)
obj := f.NewFsObject(remote)
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
@@ -71,50 +61,34 @@ func NewFs(name, root string) (fs.Fs, error) {
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *FsLocal) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *FsLocal) Root() string {
return f.root
}
// String converts this FsLocal to a string
func (f *FsLocal) String() string {
return fmt.Sprintf("Local file system at %s", f.root)
}
// newFsObject makes a half completed FsObjectLocal
func (f *FsLocal) newFsObject(remote string) *FsObjectLocal {
remote = filepath.ToSlash(remote)
dstPath := filterPath(filepath.Join(f.root, f.cleanUtf8(remote)))
return &FsObjectLocal{local: f, remote: remote, path: dstPath}
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
o := f.newFsObject(remote)
func (f *FsLocal) NewFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
path := filepath.Join(f.root, remote)
o := &FsObjectLocal{local: f, remote: remote, path: path}
if info != nil {
o.info = info
} else {
err := o.lstat()
if err != nil {
fs.Debug(o, "Failed to stat %s: %s", o.path, err)
fs.Debug(o, "Failed to stat %s: %s", path, err)
return nil
}
}
return o
}
// NewFsObject returns an FsObject from a path
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsLocal) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
return f.NewFsObjectWithInfo(remote, nil)
}
// List the path returning a channel of FsObjects
@@ -126,19 +100,19 @@ func (f *FsLocal) List() fs.ObjectsChan {
err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
log.Printf("Failed to open directory: %s: %s", path, err)
} else {
remote, err := filepath.Rel(f.root, path)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Failed to get relative path %s: %s", path, err)
log.Printf("Failed to get relative path %s: %s", path, err)
return nil
}
if remote == "." {
return nil
// remote = ""
}
if fs := f.newFsObjectWithInfo(remote, fi); fs != nil {
if fs := f.NewFsObjectWithInfo(remote, fi); fs != nil {
if fs.Storable() {
out <- fs
}
@@ -148,54 +122,14 @@ func (f *FsLocal) List() fs.ObjectsChan {
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Failed to open directory: %s: %s", f.root, err)
log.Printf("Failed to open directory: %s: %s", f.root, err)
}
close(out)
}()
return out
}
// CleanUtf8 makes string a valid UTF-8 string
//
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
func (f *FsLocal) cleanUtf8(name string) string {
if !utf8.ValidString(name) {
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
name = string([]rune(name))
}
if runtime.GOOS == "windows" {
var name2 string
if strings.HasPrefix(name, `\\?\`) {
name2 = `\\?\`
strings.TrimPrefix(name, `\\?\`)
}
if strings.HasPrefix(name, `//?/`) {
name2 = `//?/`
strings.TrimPrefix(name, `//?/`)
}
name2 += strings.Map(func(r rune) rune {
switch r {
case '<', '>', '"', '|', '?', '*', '&':
return '_'
}
return r
}, name)
if name2 != name {
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
name = name2
}
}
return name
}
// ListDir walks the path returning a channel of FsObjects
// Walk the path returning a channel of FsObjects
func (f *FsLocal) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
@@ -203,31 +137,31 @@ func (f *FsLocal) ListDir() fs.DirChan {
items, err := ioutil.ReadDir(f.root)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find read directory: %s", err)
log.Printf("Couldn't find read directory: %s", err)
} else {
for _, item := range items {
if item.IsDir() {
dir := &fs.Dir{
Name: f.cleanUtf8(item.Name()),
Name: item.Name(),
When: item.ModTime(),
Bytes: 0,
Count: 0,
}
// Go down the tree to count the files and directories
dirpath := filterPath(filepath.Join(f.root, item.Name()))
dirpath := path.Join(f.root, item.Name())
err := filepath.Walk(dirpath, func(path string, fi os.FileInfo, err error) error {
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
log.Printf("Failed to open directory: %s: %s", path, err)
} else {
dir.Count++
dir.Count += 1
dir.Bytes += fi.Size()
}
return nil
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Failed to open directory: %s: %s", dirpath, err)
log.Printf("Failed to open directory: %s: %s", dirpath, err)
}
out <- dir
}
@@ -238,10 +172,11 @@ func (f *FsLocal) ListDir() fs.DirChan {
return out
}
// Put the FsObject to the local filesystem
// Puts the FsObject to the local filesystem
func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
dstPath := filepath.Join(f.root, remote)
// Temporary FsObject under construction - info filled in by Update()
o := f.newFsObject(remote)
o := &FsObjectLocal{local: f, remote: remote, path: dstPath}
err := o.Update(in, modTime, size)
if err != nil {
return nil, err
@@ -251,8 +186,7 @@ func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64
// Mkdir creates the directory if it doesn't exist
func (f *FsLocal) Mkdir() error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
return os.MkdirAll(f.root, 0777)
return os.MkdirAll(f.root, 0770)
}
// Rmdir removes the directory
@@ -262,7 +196,7 @@ func (f *FsLocal) Rmdir() error {
return os.Remove(f.root)
}
// Precision of the file system
// Return the precision
func (f *FsLocal) Precision() (precision time.Duration) {
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
@@ -284,15 +218,12 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
}
path := fd.Name()
// fmt.Println("Created temp file", path)
err = fd.Close()
if err != nil {
return time.Second
}
fd.Close()
// Delete it on return
defer func() {
// fmt.Println("Remove temp file")
_ = os.Remove(path) // ignore error
os.Remove(path)
}()
// Find the minimum duration we can detect
@@ -328,104 +259,12 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *FsLocal) Purge() error {
fi, err := os.Lstat(f.root)
if err != nil {
return err
}
if !fi.Mode().IsDir() {
return fmt.Errorf("Can't Purge non directory: %q", f.root)
}
return os.RemoveAll(f.root)
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *FsLocal) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*FsObjectLocal)
if !ok {
fs.Debug(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Temporary FsObject under construction
dstObj := f.newFsObject(remote)
// Check it is a file if it exists
err := dstObj.lstat()
if os.IsNotExist(err) {
// OK
} else if err != nil {
return nil, err
} else if !dstObj.info.Mode().IsRegular() {
// It isn't a file
return nil, fmt.Errorf("Can't move file onto non-file")
}
// Create destination
err = dstObj.mkdirAll()
if err != nil {
return nil, err
}
// Do the move
err = os.Rename(srcObj.path, dstObj.path)
if err != nil {
return nil, err
}
// Update the info
err = dstObj.lstat()
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src directory to this remote using server side move
// operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *FsLocal) DirMove(src fs.Fs) error {
srcFs, ok := src.(*FsLocal)
if !ok {
fs.Debug(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
// Check if source exists
sstat, err := os.Lstat(srcFs.root)
if err != nil {
return err
}
// And is a directory
if !sstat.IsDir() {
return fs.ErrorCantDirMove
}
// Check if destination exists
_, err = os.Lstat(f.root)
if !os.IsNotExist(err) {
return fs.ErrorDirExists
}
// Do the move
return os.Rename(srcFs.root, f.root)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
// Return the parent Fs
func (o *FsObjectLocal) Fs() fs.Fs {
return o.local
}
@@ -438,9 +277,9 @@ func (o *FsObjectLocal) String() string {
return o.remote
}
// Remote returns the remote path
// Return the remote path
func (o *FsObjectLocal) Remote() string {
return o.local.cleanUtf8(o.remote)
return o.remote
}
// Md5sum calculates the Md5sum of a file returning a lowercase hex string
@@ -451,7 +290,7 @@ func (o *FsObjectLocal) Md5sum() (string, error) {
in, err := os.Open(o.path)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to open: %s", err)
fs.Log(o, "Failed to open: %s", err)
return "", err
}
hash := md5.New()
@@ -459,12 +298,12 @@ func (o *FsObjectLocal) Md5sum() (string, error) {
closeErr := in.Close()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to read: %s", err)
fs.Log(o, "Failed to read: %s", err)
return "", err
}
if closeErr != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to close: %s", closeErr)
fs.Log(o, "Failed to close: %s", closeErr)
return "", closeErr
}
o.md5sum = hex.EncodeToString(hash.Sum(nil))
@@ -481,29 +320,22 @@ func (o *FsObjectLocal) ModTime() time.Time {
return o.info.ModTime()
}
// SetModTime sets the modification time of the local fs object
// Sets the modification time of the local fs object
func (o *FsObjectLocal) SetModTime(modTime time.Time) {
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
fs.Debug(o, "Failed to set mtime on file: %s", err)
return
}
// Re-read metadata
err = o.lstat()
if err != nil {
fs.Debug(o, "Failed to stat: %s", err)
return
}
}
// Storable returns a boolean showing if this object is storable
// Is this object storable
func (o *FsObjectLocal) Storable() bool {
mode := o.info.Mode()
if mode&(os.ModeSymlink|os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
fs.Debug(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
// fs.Debug(o, "Skipping directory")
fs.Debug(o, "FIXME Skipping directory")
return false
}
return true
@@ -553,15 +385,10 @@ func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
return
}
// mkdirAll makes all the directories needed to store the object
func (o *FsObjectLocal) mkdirAll() error {
dir, _ := getDirFile(o.path)
return os.MkdirAll(dir, 0777)
}
// Update the object from in with modTime and size
func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) error {
err := o.mkdirAll()
dir := path.Dir(o.path)
err := os.MkdirAll(dir, 0770)
if err != nil {
return err
}
@@ -606,67 +433,7 @@ func (o *FsObjectLocal) Remove() error {
return os.Remove(o.path)
}
// Return the current directory and file from a path
// Assumes os.PathSeparator is used.
func getDirFile(s string) (string, string) {
i := strings.LastIndex(s, string(os.PathSeparator))
return s[:i], s[i+1:]
}
func filterPath(s string) string {
s = filepath.Clean(s)
if runtime.GOOS == "windows" {
s = strings.Replace(s, `/`, `\`, -1)
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
// Convert to UNC
return uncPath(s)
}
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
return s
}
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(s string) string {
// UNC can NOT use "/", so convert all to "\"
s = strings.Replace(s, `/`, `\`, -1)
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(s, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(s, `\\?\`) {
return s
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
}
if isAbsWinDrive.MatchString(s) {
return `\\?\` + s
}
return s
}
// Check the interfaces are satisfied
var _ fs.Fs = &FsLocal{}
var _ fs.Purger = &FsLocal{}
var _ fs.Mover = &FsLocal{}
var _ fs.DirMover = &FsLocal{}
var _ fs.Object = &FsObjectLocal{}

View File

@@ -1,56 +0,0 @@
// Test Local filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package local_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/local"
)
func init() {
fstests.NilObject = fs.Object((*local.FsObjectLocal)(nil))
fstests.RemoteName = ""
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,52 +0,0 @@
package local
import (
"testing"
)
var uncTestPaths = []string{
"C:\\Ba*d\\P|a?t<h>\\Windows\\Folder",
"C:/Ba*d/P|a?t<h>/Windows\\Folder",
"C:\\Windows\\Folder",
"\\\\?\\C:\\Windows\\Folder",
"//?/C:/Windows/Folder",
"\\\\?\\UNC\\server\\share\\Desktop",
"\\\\?\\unC\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
"\\\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
"C:\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
"C:\\AbsoluteToRoot\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
"\\\\server\\share\\Desktop",
"\\\\?\\UNC\\\\share\\folder\\Desktop",
"\\\\server\\share",
}
var uncTestPathsResults = []string{
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\UNC\server\share\Desktop`,
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\UNC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\UNC\server\share\Desktop`,
`\\?\UNC\\share\folder\Desktop`,
`\\?\UNC\server\share`,
}
// Test that UNC paths are converted.
func TestUncPaths(t *testing.T) {
for i, p := range uncTestPaths {
unc := uncPath(p)
if unc != uncTestPathsResults[i] {
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
}
// Test we don't add more.
unc = uncPath(unc)
if unc != uncTestPathsResults[i] {
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
}
}
}

View File

@@ -1,83 +0,0 @@
#!/usr/bin/python
"""
Make single page versions of the documentation for release and
conversion into man pages etc.
"""
import os
import re
from datetime import datetime
docpath = "docs/content"
outfile = "MANUAL.md"
# Order to add docs segments to make outfile
docs = [
"about.md",
"install.md",
"docs.md",
"filtering.md",
"overview.md",
"drive.md",
"s3.md",
"swift.md",
"dropbox.md",
"googlecloudstorage.md",
"amazonclouddrive.md",
"local.md",
"changelog.md",
"bugs.md",
"faq.md",
"licence.md",
"authors.md",
"contact.md",
]
# Docs which aren't made into outfile
ignore_docs = [
"downloads.md",
"privacy.md",
"donate.md",
]
def read_doc(doc):
"""Read file as a string"""
path = os.path.join(docpath, doc)
with open(path) as fd:
contents = fd.read()
parts = contents.split("---\n", 2)
if len(parts) != 3:
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
contents = parts[2].strip()+"\n\n"
# Remove icons
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
# Make [...](/links/) absolute
contents = re.sub(r'\((\/.*?\/)\)', r"(http://rclone.org\1)", contents)
return contents
def check_docs(docpath):
"""Check all the docs are in docpath"""
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
files -= set(ignore_docs)
docs_set = set(docs)
if files == docs_set:
return
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
raise ValueError("Missing files")
def main():
check_docs(docpath)
with open(outfile, "w") as out:
out.write("""\
%% rclone(1) User Manual
%% Nick Craig-Wood
%% %s
""" % datetime.now().strftime("%b %d, %Y"))
for doc in docs:
out.write(read_doc(doc))
print "Written '%s'" % outfile
if __name__ == "__main__":
main()

View File

@@ -1,26 +1,3 @@
Perhaps make Md5sum() and Modtime() optional. Define the zero values
"" and 0. Make it so we can support remotes which can't do those.
Fix the docs
* factor the README.md into the docs directory
* create it as part of make by assembling other parts
* write long docs about each flag
Change lsd command so it doesn't show -1
* Make sure all Fses show -1 for objects Zero for dates etc
* Make test?
Put the TestRemote names into the Fs description
Make test_all.sh use the TestRemote name automatically
Run errcheck and go vet in the make file
.. Also race detector?
.. go tool vet -shadow
Get rid of Storable?
Write developer manual
Todo
* FIXME: More -dry-run checks for object transfer
* Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files
@@ -28,23 +5,24 @@ Todo
* if object.PseudoDirectory {
* fmt.Printf("%9s %19s %s\n", "Directory", "-", fs.Remote())
* Make Account wrapper
* limit bandwidth for a pool of all individual connectinos
* do timeouts by setting a limit, seeing whether io has happened
and resetting it if it has
* make Account do progress meter
* Make logging controllable with flags (mostly done)
* -timeout: Make all timeouts be settable with command line parameters
* Windows paths? Do we need to translate / and \?
* Make a fs.Errorf and count errors and log them at a different level
* Add max object size to fs metadata - 5GB for swift, infinite for local, ? for s3
* tie into -max-size flag
* FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs
creation in common code? Or try for as much as possible?
* FIXME Account all the transactons (ls etc) using a different
Roundtripper wrapper which wraps the transactions?
More rsync features
* include
* exclude
* max size
* -c, --checksum skip based on checksum, not mod-time & size
Ideas for flags
* --retries N flag which would make rclone retry a sync until successful or it tried N times.
* FIXME write tests for local file system
* FIXME implement tests for single file operations in rclonetest
* Need to make directory objects otherwise can't upload an empty directory
* Or could upload empty directories only?
Ideas
* could do encryption - put IV into metadata?
@@ -57,16 +35,26 @@ Ideas
* control times sync (which is slow with some remotes) with -a --archive flag?
* Copy a glob pattern - could do with LimitedFs
Make an encryption layer.
This would layer over the source FS to
* decrypt all gets
* encrypt all puts
* encrypt file names in list
* decrypt them in list
Would like to be able to see unencrypted file names in remote though? How? Or is that two encryption layers..?
s3
* Can maybe set last modified?
* https://forums.aws.amazon.com/message.jspa?messageID=214062
* Otherwise can set metadata
* Returns etag and last modified in bucket list
Bugs
* Non verbose - not sure number transferred got counted up? CHECK
* When doing copy it recurses the whole of the destination FS which isn't necessary
Making a release
* go install -v ./...
* go test ./...
* rclonetest/test.sh
* make tag
* edit README.md
* git commit fs/version.go README.md docs/content/downloads.md
* make retag
* . ~/bin/go-cross
* make cross
* make upload
* make upload_website
* git push --tags origin master

View File

@@ -1,322 +0,0 @@
package oauthutil
import (
"crypto/rand"
"encoding/json"
"fmt"
"log"
"net"
"net/http"
"time"
"github.com/ncw/rclone/fs"
"github.com/skratchdot/open-golang/open"
"golang.org/x/net/context"
"golang.org/x/oauth2"
)
const (
// ConfigToken is the key used to store the token under
ConfigToken = "token"
// ConfigClientID is the config key used to store the client id
ConfigClientID = "client_id"
// ConfigClientSecret is the config key used to store the client secret
ConfigClientSecret = "client_secret"
// TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization
// code should be returned in the title bar of the browser, with the page text
// prompting the user to copy the code and paste it in the application.
TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob"
// BindAddress is binding for local webserver when active
bindAddress = "127.0.0.1:53682"
// RedirectURL is redirect to local webserver when active
RedirectURL = "http://" + bindAddress + "/"
)
// oldToken contains an end-user's tokens.
// This is the data you must store to persist authentication.
//
// From the original code.google.com/p/goauth2/oauth package - used
// for backwards compatibility in the rclone config file
type oldToken struct {
AccessToken string
RefreshToken string
Expiry time.Time
}
// getToken returns the token saved in the config file under
// section name.
func getToken(name string) (*oauth2.Token, error) {
tokenString, err := fs.ConfigFile.GetValue(string(name), ConfigToken)
if err != nil {
return nil, err
}
if tokenString == "" {
return nil, fmt.Errorf("Empty token found - please run rclone config again")
}
token := new(oauth2.Token)
err = json.Unmarshal([]byte(tokenString), token)
if err != nil {
return nil, err
}
// if has data then return it
if token.AccessToken != "" && token.RefreshToken != "" {
return token, nil
}
// otherwise try parsing as oldToken
oldtoken := new(oldToken)
err = json.Unmarshal([]byte(tokenString), oldtoken)
if err != nil {
return nil, err
}
// Fill in result into new token
token.AccessToken = oldtoken.AccessToken
token.RefreshToken = oldtoken.RefreshToken
token.Expiry = oldtoken.Expiry
// Save new format in config file
err = putToken(name, token)
if err != nil {
return nil, err
}
return token, nil
}
// putToken stores the token in the config file
//
// This saves the config file if it changes
func putToken(name string, token *oauth2.Token) error {
tokenBytes, err := json.Marshal(token)
if err != nil {
return err
}
tokenString := string(tokenBytes)
old := fs.ConfigFile.MustValue(name, ConfigToken)
if tokenString != old {
fs.ConfigFile.SetValue(name, ConfigToken, tokenString)
fs.SaveConfig()
fs.Debug(name, "Saving new token in config file")
}
return nil
}
// tokenSource stores updated tokens in the config file
type tokenSource struct {
Name string
TokenSource oauth2.TokenSource
OldToken oauth2.Token
}
// Token returns a token or an error.
// Token must be safe for concurrent use by multiple goroutines.
// The returned Token must not be modified.
//
// This saves the token in the config file if it has changed
func (ts *tokenSource) Token() (*oauth2.Token, error) {
token, err := ts.TokenSource.Token()
if err != nil {
return nil, err
}
if *token != ts.OldToken {
err = putToken(ts.Name, token)
if err != nil {
return nil, err
}
}
return token, nil
}
// Check interface satisfied
var _ oauth2.TokenSource = (*tokenSource)(nil)
// Context returns a context with our HTTP Client baked in for oauth2
func Context() context.Context {
return context.WithValue(nil, oauth2.HTTPClient, fs.Config.Client())
}
// overrideCredentials sets the ClientID and ClientSecret from the
// config file if they are not blank
func overrideCredentials(name string, config *oauth2.Config) {
ClientID := fs.ConfigFile.MustValue(name, ConfigClientID)
if ClientID != "" {
config.ClientID = ClientID
}
ClientSecret := fs.ConfigFile.MustValue(name, ConfigClientSecret)
if ClientSecret != "" {
config.ClientSecret = ClientSecret
}
}
// NewClient gets a token from the config file and configures a Client
// with it
func NewClient(name string, config *oauth2.Config) (*http.Client, error) {
overrideCredentials(name, config)
token, err := getToken(name)
if err != nil {
return nil, err
}
// Set our own http client in the context
ctx := Context()
// Wrap the TokenSource in our TokenSource which saves changed
// tokens in the config file
ts := &tokenSource{
Name: name,
OldToken: *token,
TokenSource: config.TokenSource(ctx, token),
}
return oauth2.NewClient(ctx, ts), nil
}
// Config does the initial creation of the token
//
// It may run an internal webserver to receive the results
func Config(name string, config *oauth2.Config) error {
overrideCredentials(name, config)
// See if already have a token
tokenString := fs.ConfigFile.MustValue(name, "token")
if tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !fs.Confirm() {
return nil
}
}
// Detect whether we should use internal web server
useWebServer := false
switch config.RedirectURL {
case RedirectURL:
useWebServer = true
case TitleBarRedirectURL:
fmt.Printf("Use auto config?\n")
fmt.Printf(" * Say Y if not sure\n")
fmt.Printf(" * Say N if you are working on a remote or headless machine or Y didn't work\n")
useWebServer = fs.Confirm()
if useWebServer {
// copy the config and set to use the internal webserver
configCopy := *config
config = &configCopy
config.RedirectURL = RedirectURL
}
}
// Make random state
stateBytes := make([]byte, 16)
_, err := rand.Read(stateBytes)
if err != nil {
return err
}
state := fmt.Sprintf("%x", stateBytes)
authURL := config.AuthCodeURL(state)
// Prepare webserver
server := authServer{
state: state,
bindAddress: bindAddress,
authURL: authURL,
}
if useWebServer {
server.code = make(chan string, 1)
go server.Start()
defer server.Stop()
authURL = "http://" + bindAddress + "/auth"
}
// Generate a URL for the user to visit for authorization.
_ = open.Start(authURL)
fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authURL)
fmt.Printf("Log in and authorize rclone for access\n")
var authCode string
if useWebServer {
// Read the code, and exchange it for a token.
fmt.Printf("Waiting for code...\n")
authCode = <-server.code
if authCode != "" {
fmt.Printf("Got code\n")
} else {
return fmt.Errorf("Failed to get code")
}
} else {
// Read the code, and exchange it for a token.
fmt.Printf("Enter verification code> ")
authCode = fs.ReadLine()
}
token, err := config.Exchange(oauth2.NoContext, authCode)
if err != nil {
return fmt.Errorf("Failed to get token: %v", err)
}
return putToken(name, token)
}
// Local web server for collecting auth
type authServer struct {
state string
listener net.Listener
bindAddress string
code chan string
authURL string
}
// startWebServer runs an internal web server to receive config details
func (s *authServer) Start() {
fs.Debug(nil, "Starting auth server on %s", s.bindAddress)
mux := http.NewServeMux()
server := &http.Server{
Addr: s.bindAddress,
Handler: mux,
}
mux.HandleFunc("/favicon.ico", func(w http.ResponseWriter, req *http.Request) {
http.Error(w, "", 404)
return
})
mux.HandleFunc("/auth", func(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, s.authURL, 307)
return
})
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
fs.Debug(nil, "Received request on auth server")
code := req.FormValue("code")
if code != "" {
state := req.FormValue("state")
if state != s.state {
fs.Debug(nil, "State did not match: want %q got %q", s.state, state)
fmt.Fprintf(w, "<h1>Failure</h1>\n<p>Auth state doesn't match</p>")
} else {
fs.Debug(nil, "Successfully got code")
if s.code != nil {
fmt.Fprintf(w, "<h1>Success</h1>\n<p>Go back to rclone to continue</p>")
s.code <- code
} else {
fmt.Fprintf(w, "<h1>Success</h1>\n<p>Cut and paste this code into rclone: <code>%s</code></p>", code)
}
}
return
}
fs.Debug(nil, "No code found on request")
fmt.Fprintf(w, "<h1>Failed!</h1>\nNo code found.")
http.Error(w, "", 500)
})
var err error
s.listener, err = net.Listen("tcp", s.bindAddress)
if err != nil {
log.Fatalf("Failed to start auth webserver: %v", err)
}
err = server.Serve(s.listener)
fs.Debug(nil, "Closed auth server with error: %v", err)
}
func (s *authServer) Stop() {
fs.Debug(nil, "Closing auth server")
if s.code != nil {
close(s.code)
s.code = nil
}
_ = s.listener.Close()
}

View File

@@ -1,293 +0,0 @@
// Package pacer makes pacing and retrying API calls easy
package pacer
import (
"math/rand"
"sync"
"time"
"github.com/ncw/rclone/fs"
)
// Pacer state
type Pacer struct {
mu sync.Mutex // Protecting read/writes
minSleep time.Duration // minimum sleep time
maxSleep time.Duration // maximum sleep time
decayConstant uint // decay constant
pacer chan struct{} // To pace the operations
sleepTime time.Duration // Time to sleep for each transaction
retries int // Max number of retries
maxConnections int // Maximum number of concurrent connections
connTokens chan struct{} // Connection tokens
calculatePace func(bool) // switchable pacing algorithm - call with mu held
consecutiveRetries int // number of consecutive retries
}
// Type is for selecting different pacing algorithms
type Type int
const (
// DefaultPacer is a truncated exponential attack and decay.
//
// On retries the sleep time is doubled, on non errors then
// sleeptime decays according to the decay constant as set
// with SetDecayConstant.
//
// The sleep never goes below that set with SetMinSleep or
// above that set with SetMaxSleep.
DefaultPacer = Type(iota)
// AmazonCloudDrivePacer is a specialised pacer for Amazon Cloud Drive
//
// It implements a truncated exponential backoff strategy with
// randomization. Normally operations are paced at the
// interval set with SetMinSleep. On errors the sleep timer
// is set to 0..2**retries seconds.
//
// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
AmazonCloudDrivePacer
)
// Paced is a function which is called by the Call and CallNoRetry
// methods. It should return a boolean, true if it would like to be
// retried, and an error. This error may be returned or returned
// wrapped in a RetryError.
type Paced func() (bool, error)
// New returns a Pacer with sensible defaults
func New() *Pacer {
p := &Pacer{
minSleep: 10 * time.Millisecond,
maxSleep: 2 * time.Second,
decayConstant: 2,
retries: 10,
pacer: make(chan struct{}, 1),
}
p.sleepTime = p.minSleep
p.SetPacer(DefaultPacer)
p.SetMaxConnections(fs.Config.Checkers + fs.Config.Transfers)
// Put the first pacing token in
p.pacer <- struct{}{}
return p
}
// SetMinSleep sets the minimum sleep time for the pacer
func (p *Pacer) SetMinSleep(t time.Duration) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.minSleep = t
p.sleepTime = p.minSleep
return p
}
// SetMaxSleep sets the maximum sleep time for the pacer
func (p *Pacer) SetMaxSleep(t time.Duration) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.maxSleep = t
p.sleepTime = p.minSleep
return p
}
// SetMaxConnections sets the maximum number of concurrent connections.
// Setting the value to 0 will allow unlimited number of connections.
// Should not be changed once you have started calling the pacer.
// By default this will be set to fs.Config.Checkers.
func (p *Pacer) SetMaxConnections(n int) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.maxConnections = n
if n <= 0 {
p.connTokens = nil
} else {
p.connTokens = make(chan struct{}, n)
for i := 0; i < n; i++ {
p.connTokens <- struct{}{}
}
}
return p
}
// SetDecayConstant sets the decay constant for the pacer
//
// This is the speed the time falls back to the minimum after errors
// have occurred.
//
// bigger for slower decay, exponential
func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.decayConstant = decay
return p
}
// SetRetries sets the max number of tries for Call
func (p *Pacer) SetRetries(retries int) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.retries = retries
return p
}
// SetPacer sets the pacing algorithm
//
// It will choose the default algorithm if an incorrect value is
// passed in.
func (p *Pacer) SetPacer(t Type) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
switch t {
case AmazonCloudDrivePacer:
p.calculatePace = p.acdPacer
default:
p.calculatePace = p.defaultPacer
}
return p
}
// Start a call to the API
//
// This must be called as a pair with endCall
//
// This waits for the pacer token
func (p *Pacer) beginCall() {
// pacer starts with a token in and whenever we take one out
// XXX ms later we put another in. We could do this with a
// Ticker more accurately, but then we'd have to work out how
// not to run it when it wasn't needed
<-p.pacer
if p.maxConnections > 0 {
<-p.connTokens
}
p.mu.Lock()
// Restart the timer
go func(t time.Duration) {
// fs.Debug(f, "New sleep for %v at %v", t, time.Now())
time.Sleep(t)
p.pacer <- struct{}{}
}(p.sleepTime)
p.mu.Unlock()
}
// exponentialImplementation implements a exponentialImplementation up
// and down pacing algorithm
//
// See the description for DefaultPacer
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
//
// Call with p.mu held
func (p *Pacer) defaultPacer(retry bool) {
oldSleepTime := p.sleepTime
if retry {
p.sleepTime *= 2
if p.sleepTime > p.maxSleep {
p.sleepTime = p.maxSleep
}
if p.sleepTime != oldSleepTime {
fs.Debug("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
}
} else {
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
if p.sleepTime < p.minSleep {
p.sleepTime = p.minSleep
}
if p.sleepTime != oldSleepTime {
fs.Debug("pacer", "Reducing sleep to %v", p.sleepTime)
}
}
}
// acdPacer implements a truncated exponential backoff
// strategy with randomization for Amazon Cloud Drive
//
// See the description for AmazonCloudDrivePacer
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
//
// Call with p.mu held
func (p *Pacer) acdPacer(retry bool) {
consecutiveRetries := p.consecutiveRetries
if consecutiveRetries == 0 {
if p.sleepTime != p.minSleep {
p.sleepTime = p.minSleep
fs.Debug("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
}
} else {
if consecutiveRetries > 9 {
consecutiveRetries = 9
}
// consecutiveRetries starts at 1 so
// maxSleep is 2**(consecutiveRetries-1) seconds
maxSleep := time.Second << uint(consecutiveRetries-1)
// actual sleep is random from 0..maxSleep
p.sleepTime = time.Duration(rand.Int63n(int64(maxSleep)))
if p.sleepTime < p.minSleep {
p.sleepTime = p.minSleep
}
fs.Debug("pacer", "Rate limited, sleeping for %v (%d retries)", p.sleepTime, consecutiveRetries)
}
}
// endCall implements the pacing algorithm
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
func (p *Pacer) endCall(retry bool) {
if p.maxConnections > 0 {
p.connTokens <- struct{}{}
}
p.mu.Lock()
if retry {
p.consecutiveRetries++
} else {
p.consecutiveRetries = 0
}
p.calculatePace(retry)
p.mu.Unlock()
}
// call implements Call but with settable retries
func (p *Pacer) call(fn Paced, retries int) (err error) {
var retry bool
for i := 0; i < retries; i++ {
p.beginCall()
retry, err = fn()
p.endCall(retry)
if !retry {
break
}
}
if retry {
err = fs.RetryError(err)
}
return err
}
// Call paces the remote operations to not exceed the limits and retry
// on rate limit exceeded
//
// This calls fn, expecting it to return a retry flag and an
// error. This error may be returned wrapped in a RetryError if the
// number of retries is exceeded.
func (p *Pacer) Call(fn Paced) (err error) {
p.mu.Lock()
retries := p.retries
p.mu.Unlock()
return p.call(fn, retries)
}
// CallNoRetry paces the remote operations to not exceed the limits
// and return a retry error on rate limit exceeded
//
// This calls fn and wraps the output in a RetryError if it would like
// it to be retried
func (p *Pacer) CallNoRetry(fn Paced) error {
return p.call(fn, 1)
}

View File

@@ -1,324 +0,0 @@
package pacer
import (
"fmt"
"testing"
"time"
"github.com/ncw/rclone/fs"
)
func TestNew(t *testing.T) {
p := New()
if p.minSleep != 10*time.Millisecond {
t.Errorf("minSleep")
}
if p.maxSleep != 2*time.Second {
t.Errorf("maxSleep")
}
if p.sleepTime != p.minSleep {
t.Errorf("sleepTime")
}
if p.retries != 10 {
t.Errorf("retries")
}
if p.decayConstant != 2 {
t.Errorf("decayConstant")
}
if cap(p.pacer) != 1 {
t.Errorf("pacer 1")
}
if len(p.pacer) != 1 {
t.Errorf("pacer 2")
}
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.defaultPacer) {
t.Errorf("calculatePace")
}
if p.maxConnections != fs.Config.Checkers+fs.Config.Transfers {
t.Errorf("maxConnections")
}
if cap(p.connTokens) != fs.Config.Checkers+fs.Config.Transfers {
t.Errorf("connTokens")
}
if p.consecutiveRetries != 0 {
t.Errorf("consecutiveRetries")
}
}
func TestSetMinSleep(t *testing.T) {
p := New().SetMinSleep(1 * time.Millisecond)
if p.minSleep != 1*time.Millisecond {
t.Errorf("didn't set")
}
}
func TestSetMaxSleep(t *testing.T) {
p := New().SetMaxSleep(100 * time.Second)
if p.maxSleep != 100*time.Second {
t.Errorf("didn't set")
}
}
func TestMaxConnections(t *testing.T) {
p := New().SetMaxConnections(20)
if p.maxConnections != 20 {
t.Errorf("maxConnections")
}
if cap(p.connTokens) != 20 {
t.Errorf("connTokens")
}
p.SetMaxConnections(0)
if p.maxConnections != 0 {
t.Errorf("maxConnections is not 0")
}
if p.connTokens != nil {
t.Errorf("connTokens is not nil")
}
}
func TestSetDecayConstant(t *testing.T) {
p := New().SetDecayConstant(17)
if p.decayConstant != 17 {
t.Errorf("didn't set")
}
}
func TestSetRetries(t *testing.T) {
p := New().SetRetries(18)
if p.retries != 18 {
t.Errorf("didn't set")
}
}
func TestSetPacer(t *testing.T) {
p := New().SetPacer(AmazonCloudDrivePacer)
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.acdPacer) {
t.Errorf("calculatePace is not acdPacer")
}
p.SetPacer(DefaultPacer)
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.defaultPacer) {
t.Errorf("calculatePace is not defaultPacer")
}
}
// emptyTokens empties the pacer of all its tokens
func emptyTokens(p *Pacer) {
for len(p.pacer) != 0 {
<-p.pacer
}
for len(p.connTokens) != 0 {
<-p.connTokens
}
}
// waitForPace waits for duration for the pace to arrive
// returns the time that it arrived or a zero time
func waitForPace(p *Pacer, duration time.Duration) (when time.Time) {
select {
case <-time.After(duration):
return
case <-p.pacer:
return time.Now()
}
}
func TestBeginCall(t *testing.T) {
p := New().SetMaxConnections(10).SetMinSleep(1 * time.Millisecond)
emptyTokens(p)
go p.beginCall()
if !waitForPace(p, 10*time.Millisecond).IsZero() {
t.Errorf("beginSleep fired too early #1")
}
startTime := time.Now()
p.pacer <- struct{}{}
time.Sleep(1 * time.Millisecond)
connTime := time.Now()
p.connTokens <- struct{}{}
time.Sleep(1 * time.Millisecond)
paceTime := waitForPace(p, 10*time.Millisecond)
if paceTime.IsZero() {
t.Errorf("beginSleep didn't fire")
} else if paceTime.Sub(startTime) < 0 {
t.Errorf("pace arrived before returning pace token")
} else if paceTime.Sub(connTime) < 0 {
t.Errorf("pace arrived before sending conn token")
}
}
func TestBeginCallZeroConnections(t *testing.T) {
p := New().SetMaxConnections(0).SetMinSleep(1 * time.Millisecond)
emptyTokens(p)
go p.beginCall()
if !waitForPace(p, 10*time.Millisecond).IsZero() {
t.Errorf("beginSleep fired too early #1")
}
startTime := time.Now()
p.pacer <- struct{}{}
time.Sleep(1 * time.Millisecond)
paceTime := waitForPace(p, 10*time.Millisecond)
if paceTime.IsZero() {
t.Errorf("beginSleep didn't fire")
} else if paceTime.Sub(startTime) < 0 {
t.Errorf("pace arrived before returning pace token")
}
}
func TestDefaultPacer(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second).SetDecayConstant(2)
for _, test := range []struct {
in time.Duration
retry bool
want time.Duration
}{
{time.Millisecond, true, 2 * time.Millisecond},
{time.Second, true, time.Second},
{(3 * time.Second) / 4, true, time.Second},
{time.Second, false, 750 * time.Millisecond},
{1000 * time.Microsecond, false, time.Millisecond},
{1200 * time.Microsecond, false, time.Millisecond},
} {
p.sleepTime = test.in
p.defaultPacer(test.retry)
got := p.sleepTime
if got != test.want {
t.Errorf("bad sleep want %v got %v", test.want, got)
}
}
}
func TestAmazonCloudDrivePacer(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetPacer(AmazonCloudDrivePacer).SetMaxSleep(time.Second).SetDecayConstant(2)
// Do lots of times because of the random number!
for _, test := range []struct {
in time.Duration
consecutiveRetries int
retry bool
want time.Duration
}{
{time.Millisecond, 0, true, time.Millisecond},
{10 * time.Millisecond, 0, true, time.Millisecond},
{1 * time.Second, 1, true, 500 * time.Millisecond},
{1 * time.Second, 2, true, 1 * time.Second},
{1 * time.Second, 3, true, 2 * time.Second},
{1 * time.Second, 4, true, 4 * time.Second},
{1 * time.Second, 5, true, 8 * time.Second},
{1 * time.Second, 6, true, 16 * time.Second},
{1 * time.Second, 7, true, 32 * time.Second},
{1 * time.Second, 8, true, 64 * time.Second},
{1 * time.Second, 9, true, 128 * time.Second},
{1 * time.Second, 10, true, 128 * time.Second},
{1 * time.Second, 11, true, 128 * time.Second},
} {
const n = 1000
var sum time.Duration
// measure average time over n cycles
for i := 0; i < n; i++ {
p.sleepTime = test.in
p.consecutiveRetries = test.consecutiveRetries
p.acdPacer(test.retry)
sum += p.sleepTime
}
got := sum / n
//t.Logf("%+v: got = %v", test, got)
if got < (test.want*9)/10 || got > (test.want*11)/10 {
t.Fatalf("%+v: bad sleep want %v+/-10%% got %v", test, test.want, got)
}
}
}
func TestEndCall(t *testing.T) {
p := New().SetMaxConnections(5)
emptyTokens(p)
p.consecutiveRetries = 1
p.endCall(true)
if len(p.connTokens) != 1 {
t.Errorf("Expecting 1 token")
}
if p.consecutiveRetries != 2 {
t.Errorf("Bad consecutive retries")
}
}
func TestEndCallZeroConnections(t *testing.T) {
p := New().SetMaxConnections(0)
emptyTokens(p)
p.consecutiveRetries = 1
p.endCall(false)
if len(p.connTokens) != 0 {
t.Errorf("Expecting 0 token")
}
if p.consecutiveRetries != 0 {
t.Errorf("Bad consecutive retries")
}
}
var errFoo = fmt.Errorf("Foo")
type dummyPaced struct {
retry bool
called int
}
func (dp *dummyPaced) fn() (bool, error) {
dp.called++
return dp.retry, errFoo
}
func Test_callNoRetry(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond)
dp := &dummyPaced{retry: false}
err := p.call(dp.fn, 10)
if dp.called != 1 {
t.Errorf("called want %d got %d", 1, dp.called)
}
if err != errFoo {
t.Errorf("err want %v got %v", errFoo, err)
}
}
func Test_callRetry(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond)
dp := &dummyPaced{retry: true}
err := p.call(dp.fn, 10)
if dp.called != 10 {
t.Errorf("called want %d got %d", 10, dp.called)
}
if err == errFoo {
t.Errorf("err didn't want %v got %v", errFoo, err)
}
_, ok := err.(fs.Retry)
if !ok {
t.Errorf("didn't return a retry error")
}
}
func TestCall(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond).SetRetries(20)
dp := &dummyPaced{retry: true}
err := p.Call(dp.fn)
if dp.called != 20 {
t.Errorf("called want %d got %d", 20, dp.called)
}
_, ok := err.(fs.Retry)
if !ok {
t.Errorf("didn't return a retry error")
}
}
func TestCallNoRetry(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond).SetRetries(20)
dp := &dummyPaced{retry: true}
err := p.CallNoRetry(dp.fn)
if dp.called != 1 {
t.Errorf("called want %d got %d", 1, dp.called)
}
_, ok := err.(fs.Retry)
if !ok {
t.Errorf("didn't return a retry error")
}
}

239
rclone.go
View File

@@ -12,11 +12,10 @@ import (
"strings"
"time"
"github.com/spf13/pflag"
"github.com/ogier/pflag"
"github.com/ncw/rclone/fs"
// Active file systems
_ "github.com/ncw/rclone/amazonclouddrive"
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
@@ -29,22 +28,18 @@ import (
var (
// Flags
cpuprofile = pflag.StringP("cpuprofile", "", "", "Write cpu profile to file")
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats (0 to disable)")
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats")
version = pflag.BoolP("version", "V", false, "Print the version number")
logFile = pflag.StringP("log-file", "", "", "Log everything to this file")
retries = pflag.IntP("retries", "", 3, "Retry operations this many times if they fail")
)
// Command holds info about the current running command
type Command struct {
Name string
Help string
ArgsHelp string
Run func(fdst, fsrc fs.Fs) error
Run func(fdst, fsrc fs.Fs)
MinArgs int
MaxArgs int
NoStats bool
Retry bool
}
// checkArgs checks there are enough arguments and prints a message if not
@@ -60,163 +55,152 @@ func (cmd *Command) checkArgs(args []string) {
}
}
// Commands is a slice of possible Command~s
var Commands = []Command{
{
Name: "copy",
ArgsHelp: "source:path dest:path",
ArgsHelp: "source://path dest://path",
Help: `
Copy the source to the destination. Doesn't transfer
unchanged files, testing by size and modification time or
unchanged files, testing first by modification time then by
MD5SUM. Doesn't delete files from the destination.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.CopyDir(fdst, fsrc)
Run: func(fdst, fsrc fs.Fs) {
err := fs.Sync(fdst, fsrc, false)
if err != nil {
log.Fatalf("Failed to copy: %v", err)
}
},
MinArgs: 2,
MaxArgs: 2,
Retry: true,
},
{
Name: "sync",
ArgsHelp: "source:path dest:path",
ArgsHelp: "source://path dest://path",
Help: `
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the --dry-run flag.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.Sync(fdst, fsrc)
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the --dry-run flag.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Sync(fdst, fsrc, true)
if err != nil {
log.Fatalf("Failed to sync: %v", err)
}
},
MinArgs: 2,
MaxArgs: 2,
Retry: true,
},
{
Name: "move",
ArgsHelp: "source:path dest:path",
Help: `
Moves the source to the destination. This is equivalent to a
copy followed by a purge, but may use server side operations
to speed it up. Since this can cause data loss, test first
with the --dry-run flag.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.MoveDir(fdst, fsrc)
},
MinArgs: 2,
MaxArgs: 2,
Retry: true,
},
{
Name: "ls",
ArgsHelp: "remote:path",
ArgsHelp: "[remote://path]",
Help: `
List all the objects in the the path with size and path.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.List(fdst, os.Stdout)
Run: func(fdst, fsrc fs.Fs) {
err := fs.List(fdst)
if err != nil {
log.Fatalf("Failed to list: %v", err)
}
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "lsd",
ArgsHelp: "remote:path",
ArgsHelp: "[remote://path]",
Help: `
List all directories/containers/buckets in the the path.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.ListDir(fdst, os.Stdout)
Run: func(fdst, fsrc fs.Fs) {
err := fs.ListDir(fdst)
if err != nil {
log.Fatalf("Failed to listdir: %v", err)
}
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "lsl",
ArgsHelp: "remote:path",
ArgsHelp: "[remote://path]",
Help: `
List all the objects in the the path with modification time,
size and path.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.ListLong(fdst, os.Stdout)
List all the objects in the the path with modification time, size and path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.ListLong(fdst)
if err != nil {
log.Fatalf("Failed to list long: %v", err)
}
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "md5sum",
ArgsHelp: "remote:path",
ArgsHelp: "[remote://path]",
Help: `
Produces an md5sum file for all the objects in the path. This
is in the same format as the standard md5sum tool produces.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.Md5sum(fdst, os.Stdout)
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "size",
ArgsHelp: "remote:path",
Help: `
Returns the total size of objects in remote:path and the number
of objects.`,
Run: func(fdst, fsrc fs.Fs) error {
objects, size, err := fs.Count(fdst)
Produces an md5sum file for all the objects in the path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Md5sum(fdst)
if err != nil {
return err
log.Fatalf("Failed to list: %v", err)
}
fmt.Printf("Total objects: %d\n", objects)
fmt.Printf("Total size: %v (%d bytes)\n", fs.SizeSuffix(size), size)
return nil
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "mkdir",
ArgsHelp: "remote:path",
ArgsHelp: "remote://path",
Help: `
Make the path if it doesn't already exist`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.Mkdir(fdst)
Run: func(fdst, fsrc fs.Fs) {
err := fs.Mkdir(fdst)
if err != nil {
log.Fatalf("Failed to mkdir: %v", err)
}
},
MinArgs: 1,
MaxArgs: 1,
Retry: true,
},
{
Name: "rmdir",
ArgsHelp: "remote:path",
ArgsHelp: "remote://path",
Help: `
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.Rmdir(fdst)
Run: func(fdst, fsrc fs.Fs) {
err := fs.Rmdir(fdst)
if err != nil {
log.Fatalf("Failed to rmdir: %v", err)
}
},
MinArgs: 1,
MaxArgs: 1,
Retry: true,
},
{
Name: "purge",
ArgsHelp: "remote:path",
ArgsHelp: "remote://path",
Help: `
Remove the path and all of its contents.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.Purge(fdst)
Run: func(fdst, fsrc fs.Fs) {
err := fs.Purge(fdst)
if err != nil {
log.Fatalf("Failed to purge: %v", err)
}
},
MinArgs: 1,
MaxArgs: 1,
Retry: true,
},
{
Name: "check",
ArgsHelp: "source:path dest:path",
ArgsHelp: "source://path dest://path",
Help: `
Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.Check(fdst, fsrc)
Run: func(fdst, fsrc fs.Fs) {
err := fs.Check(fdst, fsrc)
if err != nil {
log.Fatalf("Failed to check: %v", err)
}
},
MinArgs: 2,
MaxArgs: 2,
@@ -225,9 +209,8 @@ var Commands = []Command{
Name: "config",
Help: `
Enter an interactive configuration session.`,
Run: func(fdst, fsrc fs.Fs) error {
Run: func(fdst, fsrc fs.Fs) {
fs.EditConfig()
return nil
},
NoStats: true,
},
@@ -257,8 +240,7 @@ Subcommands:
fmt.Fprintf(os.Stderr, "Options:\n")
pflag.PrintDefaults()
fmt.Fprintf(os.Stderr, `
It is only necessary to use a unique prefix of the subcommand, eg 'mo'
for 'move'.
It is only necessary to use a unique prefix of the subcommand, eg 'up' for 'upload'.
`)
}
@@ -269,7 +251,7 @@ func fatal(message string, args ...interface{}) {
os.Exit(1)
}
// ParseFlags parses the command line flags
// Parse the command line flags
func ParseFlags() {
pflag.Usage = syntaxError
pflag.Parse()
@@ -283,16 +265,12 @@ func ParseFlags() {
fs.Stats.Error()
log.Fatal(err)
}
err = pprof.StartCPUProfile(f)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
}
// ParseCommand parses the command from the command line
// Parse the command from the command line
func ParseCommand() (*Command, []string) {
args := pflag.Args()
if len(args) < 1 {
@@ -303,7 +281,6 @@ func ParseCommand() (*Command, []string) {
args = args[1:]
// Find the command doing a prefix match
var found = make([]*Command, 0, 1)
var command *Command
for i := range Commands {
trialCommand := &Commands[i]
@@ -312,24 +289,16 @@ func ParseCommand() (*Command, []string) {
command = trialCommand
break
} else if strings.HasPrefix(trialCommand.Name, cmd) {
found = append(found, trialCommand)
if command != nil {
fs.Stats.Error()
log.Fatalf("Not unique - matches multiple commands %q", cmd)
}
command = trialCommand
}
}
if command == nil {
switch len(found) {
case 0:
fs.Stats.Error()
log.Fatalf("Unknown command %q", cmd)
case 1:
command = found[0]
default:
fs.Stats.Error()
var names []string
for _, cmd := range found {
names = append(names, `"`+cmd.Name+`"`)
}
log.Fatalf("Not unique - matches multiple commands: %s", strings.Join(names, ", "))
}
fs.Stats.Error()
log.Fatalf("Unknown command %q", cmd)
}
if command.Run == nil {
syntaxError()
@@ -338,7 +307,7 @@ func ParseCommand() (*Command, []string) {
return command, args
}
// NewFs creates a Fs from a name
// Create a Fs from a name
func NewFs(remote string) fs.Fs {
f, err := fs.NewFs(remote)
if err != nil {
@@ -348,11 +317,8 @@ func NewFs(remote string) fs.Fs {
return f
}
// StartStats prints the stats every statsInterval
// Print the stats every statsInterval
func StartStats() {
if *statsInterval <= 0 {
return
}
go func() {
ch := time.Tick(*statsInterval)
for {
@@ -370,20 +336,6 @@ func main() {
}
command, args := ParseCommand()
// Log file output
if *logFile != "" {
f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
log.Fatalf("Failed to open log file: %v", err)
}
_, err = f.Seek(0, os.SEEK_END)
if err != nil {
log.Printf("Failed to seek log file to end: %v", err)
}
log.SetOutput(f)
redirectStderr(f)
}
// Make source and destination fs
var fdst, fsrc fs.Fs
if len(args) >= 1 {
@@ -402,29 +354,12 @@ func main() {
// Run the actual command
if command.Run != nil {
var err error
for try := 1; try <= *retries; try++ {
err = command.Run(fdst, fsrc)
if !command.Retry || (err == nil && !fs.Stats.Errored()) {
break
}
if err != nil {
fs.Log(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
} else {
fs.Log(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
}
if try < *retries {
fs.Stats.ResetErrors()
}
}
if err != nil {
log.Fatalf("Failed to %s: %v", command.Name, err)
}
if !command.NoStats && (!fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0) {
fmt.Fprintln(os.Stderr, fs.Stats)
command.Run(fdst, fsrc)
if !command.NoStats {
fmt.Println(fs.Stats)
}
if fs.Config.Verbose {
fs.Debug(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
log.Printf("*** Go routines at exit %d\n", runtime.NumGoroutine())
}
if fs.Stats.Errored() {
os.Exit(1)

419
rclonetest/rclonetest.go Normal file
View File

@@ -0,0 +1,419 @@
// Test rclone by doing real transactions to a storage provider to and
// from the local disk
package main
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ogier/pflag"
// Active file systems
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
)
// Globals
var (
localName, remoteName string
version = pflag.BoolP("version", "V", false, "Print the version number")
subDir = pflag.BoolP("subdir", "S", false, "Test with a sub directory")
)
// Represents an item for checking
type Item struct {
Path string
Md5sum string
ModTime time.Time
Size int64
}
// Represents all items for checking
type Items struct {
byName map[string]*Item
items []Item
}
// Make an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[items[i].Path] = &items[i]
}
return is
}
// Check off an item
func (is *Items) Find(obj fs.Object) {
i, ok := is.byName[obj.Remote()]
if !ok {
log.Fatalf("Unexpected file %q", obj.Remote())
}
delete(is.byName, obj.Remote())
// Check attributes
Md5sum, err := obj.Md5sum()
if err != nil {
log.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
}
if i.Md5sum != Md5sum {
log.Fatalf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
}
if i.Size != obj.Size() {
log.Fatalf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
}
// check the mod time to the given precision
modTime := obj.ModTime()
dt := modTime.Sub(i.ModTime)
if dt >= fs.Config.ModifyWindow || dt <= -fs.Config.ModifyWindow {
log.Fatalf("%s: Modification time difference too big |%s| > %s (%s vs %s)", obj.Remote(), dt, fs.Config.ModifyWindow, modTime, i.ModTime)
}
}
// Check all done
func (is *Items) Done() {
if len(is.byName) != 0 {
for name := range is.byName {
log.Printf("Not found %q", name)
}
log.Fatalf("%d objects not found", len(is.byName))
}
}
// Checks the fs to see if it has the expected contents
func CheckListing(f fs.Fs, items []Item) {
is := NewItems(items)
for obj := range f.List() {
is.Find(obj)
}
is.Done()
}
// Parse a time string or explode
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// Write a file
func WriteFile(filePath, content string, t time.Time) {
// FIXME make directories?
filePath = path.Join(localName, filePath)
dirPath := path.Dir(filePath)
err := os.MkdirAll(dirPath, 0770)
if err != nil {
log.Fatalf("Failed to make directories %q: %v", dirPath, err)
}
err = ioutil.WriteFile(filePath, []byte(content), 0600)
if err != nil {
log.Fatalf("Failed to write file %q: %v", filePath, err)
}
err = os.Chtimes(filePath, t, t)
if err != nil {
log.Fatalf("Failed to chtimes file %q: %v", filePath, err)
}
}
// Create a random string
func RandomString(n int) string {
source := "abcdefghijklmnopqrstuvwxyz0123456789"
out := make([]byte, n)
for i := range out {
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
func TestMkdir(flocal, fremote fs.Fs) {
err := fs.Mkdir(fremote)
if err != nil {
log.Fatalf("Mkdir failed: %v", err)
}
items := []Item{}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
var t1 = Time("2001-02-03T04:05:06.499999999Z")
var t2 = Time("2011-12-25T12:59:59.123456789Z")
var t3 = Time("2011-12-30T12:59:59.000000000Z")
func TestCopy(flocal, fremote fs.Fs) {
WriteFile("sub dir/hello world", "hello world", t1)
// Check dry run is working
log.Printf("Copy with --dry-run")
fs.Config.DryRun = true
err := fs.Sync(fremote, flocal, false)
fs.Config.DryRun = false
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
items := []Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
CheckListing(flocal, items)
CheckListing(fremote, []Item{})
// Now without dry run
log.Printf("Copy")
err = fs.Sync(fremote, flocal, false)
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// Now delete the local file and download it
err = os.Remove(localName + "/sub dir/hello world")
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
CheckListing(flocal, []Item{})
CheckListing(fremote, items)
log.Printf("Copy - redownload")
err = fs.Sync(flocal, fremote, false)
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// Clean the directory
cleanTempDir()
}
func TestSync(flocal, fremote fs.Fs) {
WriteFile("empty space", "", t1)
log.Printf("Sync after changing file modtime only")
err := os.Chtimes(localName+"/empty space", t2, t2)
if err != nil {
log.Fatalf("Chtimes failed: %v", err)
}
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items := []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after adding a file")
WriteFile("potato", "------------------------------------------------------------", t3)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after changing a file's size only")
WriteFile("potato", "smaller but same date", t3)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after removing a file and adding a file --dry-run")
WriteFile("potato2", "------------------------------------------------------------", t1)
err = os.Remove(localName + "/potato")
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
fs.Config.DryRun = true
err = fs.Sync(fremote, flocal, true)
fs.Config.DryRun = false
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
before := []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, before)
log.Printf("Sync after removing a file and adding a file")
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
func TestLs(flocal, fremote fs.Fs) {
// Underlying List has been tested above, so we just make sure it runs
err := fs.List(fremote)
if err != nil {
log.Fatalf("List failed: %v", err)
}
}
func TestLsd(flocal, fremote fs.Fs) {
}
func TestCheck(flocal, fremote fs.Fs) {
}
func TestPurge(fremote fs.Fs) {
err := fs.Purge(fremote)
if err != nil {
log.Fatalf("Purge failed: %v", err)
}
unexpected := 0
for obj := range fremote.List() {
unexpected++
log.Printf("Found unexpected item %s", obj.Remote())
}
if unexpected != 0 {
log.Fatalf("exiting as found %d unexpected items", unexpected)
}
}
func TestRmdir(flocal, fremote fs.Fs) {
err := fs.Rmdir(fremote)
if err != nil {
log.Fatalf("Rmdir failed: %v", err)
}
}
func syntaxError() {
fmt.Fprintf(os.Stderr, `Test rclone with a remote to find bugs in either - %s.
Syntax: [options] remote:
Need a remote: as argument. This will create a random container or
directory under it and perform tests on it, deleting it at the end.
Options:
`, fs.Version)
pflag.PrintDefaults()
}
// Clean the temporary directory
func cleanTempDir() {
log.Printf("Cleaning temporary directory: %q", localName)
err := os.RemoveAll(localName)
if err != nil {
log.Printf("Failed to remove %q: %v", localName, err)
}
}
func main() {
pflag.Usage = syntaxError
pflag.Parse()
if *version {
fmt.Printf("rclonetest %s\n", fs.Version)
os.Exit(0)
}
fs.LoadConfig()
rand.Seed(time.Now().UnixNano())
args := pflag.Args()
if len(args) != 1 {
syntaxError()
os.Exit(1)
}
remoteName = args[0]
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
remoteName += RandomString(32)
var parentRemote fs.Fs
if *subDir {
var err error
parentRemote, err = fs.NewFs(remoteName)
if err != nil {
log.Fatalf("Failed to make parent %q: %v", remoteName, err)
}
remoteName += "/" + RandomString(8)
}
log.Printf("Testing with remote %q", remoteName)
var err error
localName, err = ioutil.TempDir("", "rclone")
if err != nil {
log.Fatalf("Failed to create temp dir: %v", err)
}
log.Printf("Testing with local %q", localName)
fremote, err := fs.NewFs(remoteName)
if err != nil {
log.Fatalf("Failed to make %q: %v", remoteName, err)
}
flocal, err := fs.NewFs(localName)
if err != nil {
log.Fatalf("Failed to make %q: %v", remoteName, err)
}
fs.CalculateModifyWindow(fremote, flocal)
TestMkdir(flocal, fremote)
TestCopy(flocal, fremote)
TestSync(flocal, fremote)
TestLs(flocal, fremote)
TestLsd(flocal, fremote)
TestCheck(flocal, fremote)
TestPurge(fremote)
//TestRmdir(flocal, fremote)
if parentRemote != nil {
TestPurge(parentRemote)
}
cleanTempDir()
log.Printf("Tests OK")
}

25
rclonetest/test.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
go install
REMOTES="
memstore:
s3:
drive2:
gcs:
dropbox:
/tmp/z
"
function test_remote {
args=$@
rclonetest $args || {
echo "*** rclonetest $args FAILED ***"
exit 1
}
}
for remote in $REMOTES; do
test_remote $remote
test_remote --subdir $remote
done

View File

@@ -1,15 +0,0 @@
// Log the panic to the log file - for oses which can't do this
//+build !windows,!unix
package main
import (
"log"
"os"
)
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
log.Printf("Can't redirect stderr to file")
}

View File

@@ -1,19 +0,0 @@
// Log the panic under unix to the log file
//+build unix
package main
import (
"log"
"os"
"syscall"
)
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
err := syscall.Dup2(int(f.Fd()), int(os.Stderr.Fd()))
if err != nil {
log.Fatalf("Failed to redirect stderr to file: %v", err)
}
}

View File

@@ -1,39 +0,0 @@
// Log the panic under windows to the log file
//
// Code from minix, via
//
// http://play.golang.org/p/kLtct7lSUg
//+build windows
package main
import (
"log"
"os"
"syscall"
)
var (
kernel32 = syscall.MustLoadDLL("kernel32.dll")
procSetStdHandle = kernel32.MustFindProc("SetStdHandle")
)
func setStdHandle(stdhandle int32, handle syscall.Handle) error {
r0, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
if r0 == 0 {
if e1 != 0 {
return error(e1)
}
return syscall.EINVAL
}
return nil
}
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
err := setStdHandle(syscall.STD_ERROR_HANDLE, syscall.Handle(f.Fd()))
if err != nil {
log.Fatalf("Failed to redirect stderr to file: %v", err)
}
}

524
s3/s3.go
View File

@@ -1,93 +1,72 @@
// Package s3 provides an interface to Amazon S3 oject storage
// S3 interface
package s3
// FIXME need to prevent anything but ListDir working for s3://
/*
Progress of port to aws-sdk
* Don't really need o.meta at all?
What happens if you CTRL-C a multipart upload
* get an incomplete upload
* disappears when you delete the bucket
*/
import (
"errors"
"fmt"
"io"
"mime"
"net/http"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/ncw/goamz/aws"
"github.com/ncw/goamz/s3"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
fs.Register(&fs.FsInfo{
Name: "s3",
NewFs: NewFs,
// AWS endpoints: http://docs.amazonwebservices.com/general/latest/gr/rande.html#s3_region
Options: []fs.Option{{
Name: "access_key_id",
Help: "AWS Access Key ID - leave blank for anonymous access.",
Help: "AWS Access Key ID.",
}, {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password) - leave blank for anonymous access.",
}, {
Name: "region",
Help: "Region to connect to.",
Examples: []fs.OptionExample{{
Value: "us-east-1",
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "eu-west-1",
Help: "EU (Ireland) Region Region\nNeeds location constraint EU or eu-west-1.",
}, {
Value: "eu-central-1",
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
}, {
Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
}, {
Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}, {
Value: "other-v2-signature",
Help: "If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.",
}, {
Value: "other-v4-signature",
Help: "If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.",
}},
Help: "AWS Secret Access Key (password). ",
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.\nSpecify if using an S3 clone such as Ceph.",
Help: "Endpoint for S3 API.",
Examples: []fs.OptionExample{{
Value: "https://s3.amazonaws.com/",
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
}, {
Value: "https://s3-external-1.amazonaws.com",
Help: "US Region, Northern Virginia only.\nLeave location constraint empty.",
}, {
Value: "https://s3-us-west-2.amazonaws.com",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "https://s3-us-west-1.amazonaws.com",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "https://s3-eu-west-1.amazonaws.com",
Help: "EU (Ireland) Region Region\nNeeds location constraint EU or eu-west-1.",
}, {
Value: "https://s3-ap-southeast-1.amazonaws.com",
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
}, {
Value: "https://s3-ap-southeast-2.amazonaws.com",
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint .",
}, {
Value: "https://s3-ap-northeast-1.amazonaws.com",
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
}, {
Value: "https://s3-sa-east-1.amazonaws.com",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region. Used when creating buckets only.",
Help: "Location constraint - must be set to match the Endpoint.",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
@@ -122,19 +101,16 @@ func init() {
// Constants
const (
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
listChunkSize = 1024 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
)
// FsS3 represents a remote s3 server
type FsS3 struct {
name string // the name of the remote
c *s3.S3 // the connection to the s3 server
bucket string // the bucket we are working on
perm string // permissions for new buckets / objects
root string // root of the bucket - ignore all objects above this
locationConstraint string // location constraint of new buckets
c *s3.S3 // the connection to the s3 server
b *s3.Bucket // the connection to the bucket
bucket string // the bucket we are working on
perm s3.ACL // permissions for new buckets / objects
root string // root of the bucket - ignore all objects above this
}
// FsObjectS3 describes a s3 object
@@ -143,29 +119,16 @@ type FsObjectS3 struct {
//
// List will read everything but meta - to fill that in need to call
// readMetaData
s3 *FsS3 // what this object is part of
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time.Time // Last modified
meta map[string]*string // The object metadata if known - may be nil
s3 *FsS3 // what this object is part of
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time.Time // Last modified
meta s3.Headers // The object metadata if known - may be nil
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *FsS3) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *FsS3) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this FsS3 to a string
func (f *FsS3) String() string {
if f.root == "" {
@@ -192,59 +155,39 @@ func s3ParsePath(path string) (bucket, directory string, err error) {
// s3Connection makes a connection to s3
func s3Connection(name string) (*s3.S3, error) {
// Make the auth
accessKeyID := fs.ConfigFile.MustValue(name, "access_key_id")
secretAccessKey := fs.ConfigFile.MustValue(name, "secret_access_key")
var auth *credentials.Credentials
switch {
case accessKeyID == "" && secretAccessKey == "":
fs.Debug(name, "Using anonymous access for S3")
auth = credentials.AnonymousCredentials
case accessKeyID == "":
accessKeyId := fs.ConfigFile.MustValue(name, "access_key_id")
if accessKeyId == "" {
return nil, errors.New("access_key_id not found")
case secretAccessKey == "":
}
secretAccessKey := fs.ConfigFile.MustValue(name, "secret_access_key")
if secretAccessKey == "" {
return nil, errors.New("secret_access_key not found")
default:
auth = credentials.NewStaticCredentials(accessKeyID, secretAccessKey, "")
}
auth := aws.Auth{AccessKey: accessKeyId, SecretKey: secretAccessKey}
// FIXME look through all the regions by name and use one of them if found
// Synthesize the region
s3Endpoint := fs.ConfigFile.MustValue(name, "endpoint")
if s3Endpoint == "" {
s3Endpoint = "https://s3.amazonaws.com/"
}
region := aws.Region{
Name: "s3",
S3Endpoint: s3Endpoint,
S3LocationConstraint: false,
}
s3LocationConstraint := fs.ConfigFile.MustValue(name, "location_constraint")
if s3LocationConstraint != "" {
region.Name = s3LocationConstraint
region.S3LocationConstraint = true
}
endpoint := fs.ConfigFile.MustValue(name, "endpoint")
region := fs.ConfigFile.MustValue(name, "region")
if region == "" && endpoint == "" {
endpoint = "https://s3.amazonaws.com/"
}
if region == "" {
region = "us-east-1"
}
awsConfig := aws.NewConfig().
WithRegion(region).
WithMaxRetries(maxRetries).
WithCredentials(auth).
WithEndpoint(endpoint).
WithHTTPClient(fs.Config.Client()).
WithS3ForcePathStyle(true)
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
c := s3.New(awsConfig)
if region == "other-v2-signature" {
fs.Debug(name, "Using v2 auth")
signer := func(req *request.Request) {
// Ignore AnonymousCredentials object
if req.Service.Config.Credentials == credentials.AnonymousCredentials {
return
}
sign(accessKeyID, secretAccessKey, req.HTTPRequest)
}
c.Handlers.Sign.Clear()
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
c.Handlers.Sign.PushBack(signer)
}
// Add user agent
c.Handlers.Build.PushBack(func(r *request.Request) {
r.HTTPRequest.Header.Set("User-Agent", fs.UserAgent)
})
c := s3.New(auth, region)
return c, nil
}
// NewFs contstructs an FsS3 from the path, bucket:path
// NewFsS3 contstructs an FsS3 from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
bucket, directory, err := s3ParsePath(root)
if err != nil {
@@ -255,21 +198,16 @@ func NewFs(name, root string) (fs.Fs, error) {
return nil, err
}
f := &FsS3{
name: name,
c: c,
bucket: bucket,
// FIXME perm: s3.Private, // FIXME need user to specify
root: directory,
locationConstraint: fs.ConfigFile.MustValue(name, "location_constraint"),
b: c.Bucket(bucket),
perm: s3.Private, // FIXME need user to specify
root: directory,
}
if f.root != "" {
f.root += "/"
// Check to see if the object exists
req := s3.HeadObjectInput{
Bucket: &f.bucket,
Key: &directory,
}
_, err = f.c.HeadObject(&req)
_, err = f.b.Head(directory, nil)
if err == nil {
remote := path.Base(directory)
f.root = path.Dir(directory)
@@ -283,28 +221,27 @@ func NewFs(name, root string) (fs.Fs, error) {
return fs.NewLimited(f, obj), nil
}
}
// f.listMultipartUploads()
return f, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object {
func (f *FsS3) NewFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
o := &FsObjectS3{
s3: f,
remote: remote,
}
if info != nil {
// Set info but not meta
if info.LastModified == nil {
fs.Log(o, "Failed to read last modified")
var err error
o.lastModified, err = time.Parse(time.RFC3339, info.LastModified)
if err != nil {
fs.Log(o, "Failed to read last modified: %s", err)
o.lastModified = time.Now()
} else {
o.lastModified = *info.LastModified
}
o.etag = aws.StringValue(info.ETag)
o.bytes = aws.Int64Value(info.Size)
o.etag = info.ETag
o.bytes = info.Size
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
@@ -315,93 +252,64 @@ func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object {
return o
}
// NewFsObject returns an FsObject from a path
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsS3) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
return f.NewFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
func (f *FsS3) list(directories bool, fn func(string, *s3.Object)) {
maxKeys := int64(listChunkSize)
func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
delimiter := ""
if directories {
delimiter = "/"
}
var marker *string
for {
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{
Bucket: &f.bucket,
Delimiter: &delimiter,
Prefix: &f.root,
MaxKeys: &maxKeys,
Marker: marker,
}
resp, err := f.c.ListObjects(&req)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err)
break
// FIXME need to implement ALL loop
objects, err := f.b.List(f.root, delimiter, "", 10000)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
} else {
rootLength := len(f.root)
if directories {
for _, remote := range objects.CommonPrefixes {
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
continue
}
remote := remote[rootLength:]
fn(remote, &s3.Key{Key: remote})
}
} else {
rootLength := len(f.root)
if directories {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix.Prefix == nil {
fs.Log(f, "Nil common prefix received")
continue
}
remote := *commonPrefix.Prefix
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
continue
}
remote = remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
fn(remote, &s3.Object{Key: &remote})
for i := range objects.Contents {
object := &objects.Contents[i]
if !strings.HasPrefix(object.Key, f.root) {
fs.Log(f, "Odd name received %q", object.Key)
continue
}
} else {
for _, object := range resp.Contents {
key := aws.StringValue(object.Key)
if !strings.HasPrefix(key, f.root) {
fs.Log(f, "Odd name received %q", key)
continue
}
remote := key[rootLength:]
fn(remote, object)
}
}
if !aws.BoolValue(resp.IsTruncated) {
break
}
// Use NextMarker if set, otherwise use last Key
if resp.NextMarker == nil || *resp.NextMarker == "" {
marker = resp.Contents[len(resp.Contents)-1].Key
} else {
marker = resp.NextMarker
remote := object.Key[rootLength:]
fn(remote, object)
}
}
}
}
// List walks the path returning a channel of FsObjects
// Walk the path returning a channel of FsObjects
func (f *FsS3) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
if f.bucket == "" {
// Return no objects at top level list
close(out)
fs.Stats.Error()
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
} else {
go func() {
defer close(out)
f.list(false, func(remote string, object *s3.Object) {
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
f.list(false, func(remote string, object *s3.Key) {
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
@@ -410,23 +318,22 @@ func (f *FsS3) List() fs.ObjectsChan {
return out
}
// ListDir lists the buckets
// Lists the buckets
func (f *FsS3) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
if f.bucket == "" {
// List the buckets
go func() {
defer close(out)
req := s3.ListBucketsInput{}
resp, err := f.c.ListBuckets(&req)
buckets, err := f.c.ListBuckets()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list buckets: %s", err)
fs.Log(f, "Couldn't list buckets: %s", err)
} else {
for _, bucket := range resp.Buckets {
for _, bucket := range buckets {
out <- &fs.Dir{
Name: aws.StringValue(bucket.Name),
When: aws.TimeValue(bucket.CreationDate),
Name: bucket.Name,
When: bucket.CreationDate,
Bytes: -1,
Count: -1,
}
@@ -437,14 +344,10 @@ func (f *FsS3) ListDir() fs.DirChan {
// List the directories in the path in the bucket
go func() {
defer close(out)
f.list(true, func(remote string, object *s3.Object) {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
f.list(true, func(remote string, object *s3.Key) {
out <- &fs.Dir{
Name: remote,
Bytes: size,
Bytes: object.Size,
Count: 0,
}
})
@@ -462,18 +365,9 @@ func (f *FsS3) Put(in io.Reader, remote string, modTime time.Time, size int64) (
// Mkdir creates the bucket if it doesn't exist
func (f *FsS3) Mkdir() error {
req := s3.CreateBucketInput{
Bucket: &f.bucket,
ACL: &f.perm,
}
if f.locationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
LocationConstraint: &f.locationConstraint,
}
}
_, err := f.c.CreateBucket(&req)
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
err := f.b.PutBucket(f.perm)
if err, ok := err.(*s3.Error); ok {
if err.Code == "BucketAlreadyOwnedByYou" {
return nil
}
}
@@ -484,52 +378,17 @@ func (f *FsS3) Mkdir() error {
//
// Returns an error if it isn't empty
func (f *FsS3) Rmdir() error {
req := s3.DeleteBucketInput{
Bucket: &f.bucket,
}
_, err := f.c.DeleteBucket(&req)
return err
return f.b.DelBucket()
}
// Precision of the remote
// Return the precision
func (f *FsS3) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *FsS3) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*FsObjectS3)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcFs := srcObj.s3
key := f.root + remote
source := srcFs.bucket + "/" + srcFs.root + srcObj.remote
req := s3.CopyObjectInput{
Bucket: &f.bucket,
Key: &key,
CopySource: &source,
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
}
_, err := f.c.CopyObject(&req)
if err != nil {
return nil, err
}
return f.NewFsObject(remote), err
}
// ------------------------------------------------------------
// Fs returns the parent Fs
// Return the parent Fs
func (o *FsObjectS3) Fs() fs.Fs {
return o.s3
}
@@ -542,22 +401,14 @@ func (o *FsObjectS3) String() string {
return o.remote
}
// Remote returns the remote path
// Return the remote path
func (o *FsObjectS3) Remote() string {
return o.remote
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *FsObjectS3) Md5sum() (string, error) {
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(etag) {
// fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
return "", nil
}
return etag, nil
return strings.Trim(strings.ToLower(o.etag), `"`), nil
}
// Size returns the size of an object in bytes
@@ -572,12 +423,8 @@ func (o *FsObjectS3) readMetaData() (err error) {
if o.meta != nil {
return nil
}
key := o.s3.root + o.remote
req := s3.HeadObjectInput{
Bucket: &o.s3.bucket,
Key: &key,
}
resp, err := o.s3.c.HeadObject(&req)
headers, err := o.s3.b.Head(o.s3.root+o.remote, nil)
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
@@ -585,17 +432,19 @@ func (o *FsObjectS3) readMetaData() (err error) {
var size int64
// Ignore missing Content-Length assuming it is 0
// Some versions of ceph do this due their apache proxies
if resp.ContentLength != nil {
size = *resp.ContentLength
if contentLength, ok := headers["Content-Length"]; ok {
size, err = strconv.ParseInt(contentLength, 10, 64)
if err != nil {
fs.Debug(o, "Failed to read size from: %q", headers)
return err
}
}
o.etag = aws.StringValue(resp.ETag)
o.etag = headers["Etag"]
o.bytes = size
o.meta = resp.Metadata
if resp.LastModified == nil {
o.meta = headers
if o.lastModified, err = time.Parse(http.TimeFormat, headers["Last-Modified"]); err != nil {
fs.Log(o, "Failed to read last modified from HEAD: %s", err)
o.lastModified = time.Now()
} else {
o.lastModified = *resp.LastModified
}
return nil
}
@@ -612,11 +461,11 @@ func (o *FsObjectS3) ModTime() time.Time {
}
// read mtime out of metadata if available
d, ok := o.meta[metaMtime]
if !ok || d == nil {
if !ok {
// fs.Debug(o, "No metadata")
return o.lastModified
}
modTime, err := swift.FloatStringToTime(*d)
modTime, err := swift.FloatStringToTime(d)
if err != nil {
fs.Log(o, "Failed to read mtime from object: %s", err)
return o.lastModified
@@ -624,105 +473,60 @@ func (o *FsObjectS3) ModTime() time.Time {
return modTime
}
// SetModTime sets the modification time of the local fs object
// Sets the modification time of the local fs object
func (o *FsObjectS3) SetModTime(modTime time.Time) {
err := o.readMetaData()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to read metadata: %s", err)
fs.Log(o, "Failed to read metadata: %s", err)
return
}
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
// Copy the object to itself to update the metadata
key := o.s3.root + o.remote
sourceKey := o.s3.bucket + "/" + key
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
req := s3.CopyObjectInput{
Bucket: &o.s3.bucket,
ACL: &o.s3.perm,
Key: &key,
CopySource: &sourceKey,
Metadata: o.meta,
MetadataDirective: &directive,
}
_, err = o.s3.c.CopyObject(&req)
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
_, err = o.s3.b.Update(o.s3.root+o.remote, o.s3.perm, o.meta)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
fs.Log(o, "Failed to update remote mtime: %s", err)
}
}
// Storable raturns a boolean indicating if this object is storable
// Is this object storable
func (o *FsObjectS3) Storable() bool {
return true
}
// Open an object for read
func (o *FsObjectS3) Open() (in io.ReadCloser, err error) {
key := o.s3.root + o.remote
req := s3.GetObjectInput{
Bucket: &o.s3.bucket,
Key: &key,
}
resp, err := o.s3.c.GetObject(&req)
if err != nil {
return nil, err
}
return resp.Body, nil
in, err = o.s3.b.GetReader(o.s3.root + o.remote)
return
}
// Update the Object from in with modTime and size
func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
opts := s3manager.UploadOptions{
// PartSize: 64 * 1024 * 1024, use default
Concurrency: 2, // limit concurrency
LeavePartsOnError: false,
S3: o.s3.c,
}
uploader := s3manager.NewUploader(&opts)
// Set the mtime in the meta data
metadata := map[string]*string{
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
// Set the mtime in the headers
headers := s3.Headers{
metaMtime: swift.TimeToFloatString(modTime),
}
// Guess the content type
contentType := fs.MimeType(o)
key := o.s3.root + o.remote
req := s3manager.UploadInput{
Bucket: &o.s3.bucket,
ACL: &o.s3.perm,
Key: &key,
Body: in,
ContentType: &contentType,
Metadata: metadata,
//ContentLength: &size,
contentType := mime.TypeByExtension(path.Ext(o.remote))
if contentType == "" {
contentType = "application/octet-stream"
}
_, err := uploader.Upload(&req)
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, contentType, o.s3.perm, headers)
if err != nil {
return err
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData()
return err
}
// Remove an object
func (o *FsObjectS3) Remove() error {
key := o.s3.root + o.remote
req := s3.DeleteObjectInput{
Bucket: &o.s3.bucket,
Key: &key,
}
_, err := o.s3.c.DeleteObject(&req)
return err
return o.s3.b.Del(o.s3.root + o.remote)
}
// Check the interfaces are satisfied
var _ fs.Fs = &FsS3{}
var _ fs.Copier = &FsS3{}
var _ fs.Object = &FsObjectS3{}

View File

@@ -1,56 +0,0 @@
// Test S3 filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package s3_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/s3"
)
func init() {
fstests.NilObject = fs.Object((*s3.FsObjectS3)(nil))
fstests.RemoteName = "TestS3:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,115 +0,0 @@
// v2 signing
package s3
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"net/http"
"sort"
"strings"
"time"
)
// URL parameters that need to be added to the signature
var s3ParamsToSign = map[string]struct{}{
"acl": struct{}{},
"location": struct{}{},
"logging": struct{}{},
"notification": struct{}{},
"partNumber": struct{}{},
"policy": struct{}{},
"requestPayment": struct{}{},
"torrent": struct{}{},
"uploadId": struct{}{},
"uploads": struct{}{},
"versionId": struct{}{},
"versioning": struct{}{},
"versions": struct{}{},
"response-content-type": struct{}{},
"response-content-language": struct{}{},
"response-expires": struct{}{},
"response-cache-control": struct{}{},
"response-content-disposition": struct{}{},
"response-content-encoding": struct{}{},
}
// sign signs requests using v2 auth
//
// Cobbled together from goamz and aws-sdk-go
func sign(AccessKey, SecretKey string, req *http.Request) {
// Set date
date := time.Now().UTC().Format(time.RFC1123)
req.Header.Set("Date", date)
// Sort out URI
uri := req.URL.Opaque
if uri != "" {
if strings.HasPrefix(uri, "//") {
// Strip off //host/uri
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
}
} else {
uri = req.URL.Path
}
if uri == "" {
uri = "/"
}
// Look through headers of interest
var md5 string
var contentType string
var headersToSign []string
for k, v := range req.Header {
k = strings.ToLower(k)
switch k {
case "content-md5":
md5 = v[0]
case "content-type":
contentType = v[0]
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
headersToSign = append(headersToSign, k+":"+vall)
}
}
}
// Make headers of interest into canonical string
var joinedHeadersToSign string
if len(headersToSign) > 0 {
sort.StringSlice(headersToSign).Sort()
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
}
// Look for query parameters which need to be added to the signature
params := req.URL.Query()
var queriesToSign []string
for k, vs := range params {
if _, ok := s3ParamsToSign[k]; ok {
for _, v := range vs {
if v == "" {
queriesToSign = append(queriesToSign, k)
} else {
queriesToSign = append(queriesToSign, k+"="+v)
}
}
}
}
// Add query parameters to URI
if len(queriesToSign) > 0 {
sort.StringSlice(queriesToSign).Sort()
uri += "?" + strings.Join(queriesToSign, "&")
}
// Make signature
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
hash := hmac.New(sha1.New, []byte(SecretKey))
_, _ = hash.Write([]byte(payload))
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
base64.StdEncoding.Encode(signature, hash.Sum(nil))
// Set signature in request
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
}

View File

@@ -1,30 +1,22 @@
// Package swift provides an interface to the Swift object storage system
// Swift interface
package swift
import (
"bytes"
"errors"
"fmt"
"io"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
"github.com/spf13/pflag"
)
// Globals
var (
chunkSize = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
fs.Register(&fs.FsInfo{
Name: "swift",
NewFs: NewFs,
Options: []fs.Option{{
@@ -52,53 +44,31 @@ func init() {
Help: "Memset Memstore UK v2",
Value: "https://auth.storage.memset.com/v2.0",
}},
}, {
Name: "tenant",
Help: "Tenant name - optional",
}, {
Name: "region",
Help: "Region name - optional",
},
// snet = flag.Bool("swift-snet", false, "Use internal service network") // FIXME not implemented
},
})
// snet = flag.Bool("swift-snet", false, "Use internal service network") // FIXME not implemented
pflag.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.")
}
// FsSwift represents a remote swift server
type FsSwift struct {
name string // name of this remote
c swift.Connection // the connection to the swift server
container string // the container we are working on
segmentsContainer string // container to store the segments (if any) in
root string // the path we are working on if any
c swift.Connection // the connection to the swift server
container string // the container we are working on
root string // the path we are working on if any
}
// FsObjectSwift describes a swift object
//
// Will definitely have info but maybe not meta
type FsObjectSwift struct {
swift *FsSwift // what this object is part of
remote string // The remote path
info swift.Object // Info from the swift object if known
headers *swift.Headers // The object headers if known
swift *FsSwift // what this object is part of
remote string // The remote path
info swift.Object // Info from the swift object if known
meta *swift.Metadata // The object metadata if known
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *FsSwift) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *FsSwift) Root() string {
if f.root == "" {
return f.container
}
return f.container + "/" + f.root
}
// String converts this FsSwift to a string
func (f *FsSwift) String() string {
if f.root == "" {
@@ -132,20 +102,15 @@ func swiftConnection(name string) (*swift.Connection, error) {
if apiKey == "" {
return nil, errors.New("key not found")
}
authURL := fs.ConfigFile.MustValue(name, "auth")
if authURL == "" {
authUrl := fs.ConfigFile.MustValue(name, "auth")
if authUrl == "" {
return nil, errors.New("auth not found")
}
c := &swift.Connection{
UserName: userName,
ApiKey: apiKey,
AuthUrl: authURL,
UserAgent: fs.UserAgent,
Tenant: fs.ConfigFile.MustValue(name, "tenant"),
Region: fs.ConfigFile.MustValue(name, "region"),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fs.Config.Transport(),
UserName: userName,
ApiKey: apiKey,
AuthUrl: authUrl,
UserAgent: fs.UserAgent,
}
err := c.Authenticate()
if err != nil {
@@ -165,11 +130,9 @@ func NewFs(name, root string) (fs.Fs, error) {
return nil, err
}
f := &FsSwift{
name: name,
c: *c,
container: container,
segmentsContainer: container + "_segments",
root: directory,
c: *c,
container: container,
root: directory,
}
if f.root != "" {
f.root += "/"
@@ -194,117 +157,92 @@ func NewFs(name, root string) (fs.Fs, error) {
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsSwift) newFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
o := &FsObjectSwift{
func (f *FsSwift) NewFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
fs := &FsObjectSwift{
swift: f,
remote: remote,
}
if info != nil {
// Set info but not headers
o.info = *info
// Set info but not meta
fs.info = *info
} else {
err := o.readMetaData() // reads info and headers, returning an error
err := fs.readMetaData() // reads info and meta, returning an error
if err != nil {
fs.Debug(o, "Failed to read metadata: %s", err)
// logged already FsDebug("Failed to read info: %s", err)
return nil
}
}
return o
return fs
}
// NewFsObject returns an FsObject from a path
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsSwift) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
}
// listFn is called from list and listContainerRoot to handle an object
type listFn func(string, *swift.Object) error
// listContainerRoot lists the objects into the function supplied from
// the container and root supplied
//
// If directories is set it only sends directories
func (f *FsSwift) listContainerRoot(container, root string, directories bool, fn listFn) error {
// Options for ObjectsWalk
opts := swift.ObjectsOpts{
Prefix: root,
Limit: 256,
}
if directories {
opts.Delimiter = '/'
}
rootLength := len(root)
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
objects, err := f.c.Objects(container, opts)
if err == nil {
for i := range objects {
object := &objects[i]
// FIXME if there are no directories, swift gives back the files for some reason!
if directories {
if !strings.HasSuffix(object.Name, "/") {
continue
}
object.Name = object.Name[:len(object.Name)-1]
}
if !strings.HasPrefix(object.Name, root) {
fs.Log(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
err = fn(remote, object)
if err != nil {
break
}
}
}
return objects, err
})
return f.NewFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
func (f *FsSwift) list(directories bool, fn listFn) {
err := f.listContainerRoot(f.container, f.root, directories, fn)
func (f *FsSwift) list(directories bool, fn func(string, *swift.Object)) {
// Options for ObjectsWalk
opts := swift.ObjectsOpts{
Prefix: f.root,
Limit: 256,
}
if directories {
opts.Delimiter = '/'
}
rootLength := len(f.root)
err := f.c.ObjectsWalk(f.container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
objects, err := f.c.Objects(f.container, opts)
if err == nil {
for i := range objects {
object := &objects[i]
// FIXME if there are no directories, swift gives back the files for some reason!
if directories && !strings.HasSuffix(object.Name, "/") {
continue
}
if !strings.HasPrefix(object.Name, f.root) {
fs.Log(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
fn(remote, object)
}
}
return objects, err
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't read container %q: %s", f.container, err)
fs.Log(f, "Couldn't read container %q: %s", f.container, err)
}
}
// List walks the path returning a channel of FsObjects
// Walk the path returning a channel of FsObjects
func (f *FsSwift) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
if f.container == "" {
// Return no objects at top level list
close(out)
fs.Stats.Error()
fs.ErrorLog(f, "Can't list objects at root - choose a container using lsd")
fs.Log(f, "Can't list objects at root - choose a container using lsd")
} else {
// List the objects
go func() {
defer close(out)
f.list(false, func(remote string, object *swift.Object) error {
if o := f.newFsObjectWithInfo(remote, object); o != nil {
// Do full metadata read on 0 size objects which might be manifest files
if o.Size() == 0 {
err := o.(*FsObjectSwift).readMetaData()
if err != nil {
fs.Debug(o, "Failed to read metadata: %v", err)
}
}
out <- o
f.list(false, func(remote string, object *swift.Object) {
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
return nil
})
}()
}
return out
}
// ListDir lists the containers
// Lists the containers
func (f *FsSwift) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
if f.container == "" {
@@ -314,7 +252,7 @@ func (f *FsSwift) ListDir() fs.DirChan {
containers, err := f.c.ContainersAll(nil)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list containers: %v", err)
fs.Log(f, "Couldn't list containers: %v", err)
} else {
for _, container := range containers {
out <- &fs.Dir{
@@ -329,13 +267,12 @@ func (f *FsSwift) ListDir() fs.DirChan {
// List the directories in the path in the container
go func() {
defer close(out)
f.list(true, func(remote string, object *swift.Object) error {
f.list(true, func(remote string, object *swift.Object) {
out <- &fs.Dir{
Name: remote,
Bytes: object.Bytes,
Count: 0,
}
return nil
})
}()
}
@@ -365,37 +302,14 @@ func (f *FsSwift) Rmdir() error {
return f.c.ContainerDelete(f.container)
}
// Precision of the remote
func (f *FsSwift) Precision() time.Duration {
// Return the precision
func (fs *FsSwift) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *FsSwift) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*FsObjectSwift)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcFs := srcObj.swift
_, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
if err != nil {
return nil, err
}
return f.NewFsObject(remote), nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
// Return the parent Fs
func (o *FsObjectSwift) Fs() fs.Fs {
return o.swift
}
@@ -408,37 +322,16 @@ func (o *FsObjectSwift) String() string {
return o.remote
}
// Remote returns the remote path
// Return the remote path
func (o *FsObjectSwift) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *FsObjectSwift) Md5sum() (string, error) {
isManifest, err := o.isManifestFile()
if err != nil {
return "", err
}
if isManifest {
fs.Debug(o, "Returning empty Md5sum for swift manifest file")
return "", nil
}
return strings.ToLower(o.info.Hash), nil
}
// isManifestFile checks for manifest header
func (o *FsObjectSwift) isManifestFile() (bool, error) {
err := o.readMetaData()
if err != nil {
if err == swift.ObjectNotFound {
return false, nil
}
return false, err
}
_, isManifestFile := (*o.headers)["X-Object-Manifest"]
return isManifestFile, nil
}
// Size returns the size of an object in bytes
func (o *FsObjectSwift) Size() int64 {
return o.info.Bytes
@@ -448,15 +341,17 @@ func (o *FsObjectSwift) Size() int64 {
//
// it also sets the info
func (o *FsObjectSwift) readMetaData() (err error) {
if o.headers != nil {
if o.meta != nil {
return nil
}
info, h, err := o.swift.c.Object(o.swift.container, o.swift.root+o.remote)
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
}
meta := h.ObjectMetadata()
o.info = info
o.headers = &h
o.meta = &meta
return nil
}
@@ -468,10 +363,10 @@ func (o *FsObjectSwift) readMetaData() (err error) {
func (o *FsObjectSwift) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Debug(o, "Failed to read metadata: %s", err)
// fs.Log(o, "Failed to read metadata: %s", err)
return o.info.LastModified
}
modTime, err := o.headers.ObjectMetadata().GetModTime()
modTime, err := o.meta.GetModTime()
if err != nil {
// fs.Log(o, "Failed to read mtime from object: %s", err)
return o.info.LastModified
@@ -479,28 +374,23 @@ func (o *FsObjectSwift) ModTime() time.Time {
return modTime
}
// SetModTime sets the modification time of the local fs object
// Sets the modification time of the local fs object
func (o *FsObjectSwift) SetModTime(modTime time.Time) {
err := o.readMetaData()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to read metadata: %s", err)
fs.Log(o, "Failed to read metadata: %s", err)
return
}
meta := o.headers.ObjectMetadata()
meta.SetModTime(modTime)
newHeaders := meta.ObjectHeaders()
for k, v := range newHeaders {
(*o.headers)[k] = v
}
err = o.swift.c.ObjectUpdate(o.swift.container, o.swift.root+o.remote, newHeaders)
o.meta.SetModTime(modTime)
err = o.swift.c.ObjectUpdate(o.swift.container, o.swift.root+o.remote, o.meta.ObjectHeaders())
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
fs.Log(o, "Failed to update remote mtime: %s", err)
}
}
// Storable returns if this object is storable
// Is this object storable
func (o *FsObjectSwift) Storable() bool {
return true
}
@@ -511,137 +401,27 @@ func (o *FsObjectSwift) Open() (in io.ReadCloser, err error) {
return
}
// min returns the smallest of x, y
func min(x, y int64) int64 {
if x < y {
return x
}
return y
}
// removeSegments removes any old segments from o
//
// if except is passed in then segments with that prefix won't be deleted
func (o *FsObjectSwift) removeSegments(except string) error {
segmentsRoot := o.swift.root + o.remote + "/"
err := o.swift.listContainerRoot(o.swift.segmentsContainer, segmentsRoot, false, func(remote string, object *swift.Object) error {
if except != "" && strings.HasPrefix(remote, except) {
// fs.Debug(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.swift.segmentsContainer)
return nil
}
segmentPath := segmentsRoot + remote
fs.Debug(o, "Removing segment file %q in container %q", segmentPath, o.swift.segmentsContainer)
return o.swift.c.ObjectDelete(o.swift.segmentsContainer, segmentPath)
})
if err != nil {
return err
}
// remove the segments container if empty, ignore errors
err = o.swift.c.ContainerDelete(o.swift.segmentsContainer)
if err == nil {
fs.Debug(o, "Removed empty container %q", o.swift.segmentsContainer)
}
return nil
}
// updateChunks updates the existing object using chunks to a separate
// container. It returns a string which prefixes current segments.
func (o *FsObjectSwift) updateChunks(in io.Reader, headers swift.Headers, size int64) (string, error) {
// Create the segmentsContainer if it doesn't exist
err := o.swift.c.ContainerCreate(o.swift.segmentsContainer, nil)
if err != nil {
return "", err
}
// Upload the chunks
left := size
i := 0
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
segmentsPath := fmt.Sprintf("%s%s/%s", o.swift.root, o.remote, uniquePrefix)
for left > 0 {
n := min(left, int64(chunkSize))
headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
segmentReader := io.LimitReader(in, n)
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
fs.Debug(o, "Uploading segment file %q into %q", segmentPath, o.swift.segmentsContainer)
_, err := o.swift.c.ObjectPut(o.swift.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
if err != nil {
return "", err
}
left -= n
i++
}
// Upload the manifest
headers["X-Object-Manifest"] = fmt.Sprintf("%s/%s", o.swift.segmentsContainer, segmentsPath)
headers["Content-Length"] = "0" // set Content-Length as we know it
emptyReader := bytes.NewReader(nil)
manifestName := o.swift.root + o.remote
_, err = o.swift.c.ObjectPut(o.swift.container, manifestName, emptyReader, true, "", "", headers)
return uniquePrefix + "/", err
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *FsObjectSwift) Update(in io.Reader, modTime time.Time, size int64) error {
// Note whether this has a manifest before starting
isManifest, err := o.isManifestFile()
if err != nil {
return err
}
// Set the mtime
m := swift.Metadata{}
m.SetModTime(modTime)
headers := m.ObjectHeaders()
uniquePrefix := ""
if size > int64(chunkSize) {
uniquePrefix, err = o.updateChunks(in, headers, size)
if err != nil {
return err
}
} else {
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it
_, err := o.swift.c.ObjectPut(o.swift.container, o.swift.root+o.remote, in, true, "", "", headers)
if err != nil {
return err
}
_, err := o.swift.c.ObjectPut(o.swift.container, o.swift.root+o.remote, in, true, "", "", m.ObjectHeaders())
if err != nil {
return err
}
// If file was a manifest then remove old/all segments
if isManifest {
err = o.removeSegments(uniquePrefix)
if err != nil {
fs.Log(o, "Failed to remove old segments - carrying on with upload: %v", err)
}
}
// Read the metadata from the newly created object
o.headers = nil // wipe old metadata
return o.readMetaData()
err = o.readMetaData()
return err
}
// Remove an object
func (o *FsObjectSwift) Remove() error {
isManifestFile, err := o.isManifestFile()
if err != nil {
return err
}
// Remove file/manifest first
err = o.swift.c.ObjectDelete(o.swift.container, o.swift.root+o.remote)
if err != nil {
return err
}
// ...then segments if required
if isManifestFile {
err = o.removeSegments("")
if err != nil {
return err
}
}
return nil
return o.swift.c.ObjectDelete(o.swift.container, o.swift.root+o.remote)
}
// Check the interfaces are satisfied
var _ fs.Fs = &FsSwift{}
var _ fs.Copier = &FsSwift{}
var _ fs.Object = &FsObjectSwift{}

View File

@@ -1,56 +0,0 @@
// Test Swift filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package swift_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/swift"
)
func init() {
fstests.NilObject = fs.Object((*swift.FsObjectSwift)(nil))
fstests.RemoteName = "TestSwift:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }