mirror of
https://github.com/rclone/rclone.git
synced 2026-01-06 10:33:34 +00:00
Compare commits
51 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
34193fd8d9 | ||
|
|
2203766f77 | ||
|
|
235cbe0e57 | ||
|
|
f50f353b5d | ||
|
|
00afe6cc96 | ||
|
|
dd48e62b7e | ||
|
|
a1a780e847 | ||
|
|
fa87077211 | ||
|
|
6ac7145d2d | ||
|
|
f1226f19b2 | ||
|
|
3ecbf2af25 | ||
|
|
79f2e95bf9 | ||
|
|
faee50b238 | ||
|
|
807d4a3c00 | ||
|
|
073d112204 | ||
|
|
14f814b806 | ||
|
|
a288c2b3a3 | ||
|
|
fec16b0ac8 | ||
|
|
dd8717797e | ||
|
|
7e7c239f09 | ||
|
|
edd0e8abb1 | ||
|
|
d2b537d9a1 | ||
|
|
8c3df224ef | ||
|
|
967fd2a778 | ||
|
|
ea12e446ca | ||
|
|
c8cd2b510f | ||
|
|
8b05a8322b | ||
|
|
c98a51b26c | ||
|
|
e2717a031e | ||
|
|
8d33ce0154 | ||
|
|
92745aa950 | ||
|
|
cbc6bf6a89 | ||
|
|
f72575e75f | ||
|
|
0168f55f3e | ||
|
|
8b60ab86a1 | ||
|
|
7463a7a509 | ||
|
|
9ed2de3d6e | ||
|
|
4f35fb59c8 | ||
|
|
59ba8f28c8 | ||
|
|
d298b578ab | ||
|
|
fabbc035c4 | ||
|
|
6530b07cde | ||
|
|
f8b7eaec93 | ||
|
|
5c226e91c0 | ||
|
|
8e3d45d2dc | ||
|
|
a96b522958 | ||
|
|
fedf81c2b7 | ||
|
|
0c6f816a49 | ||
|
|
dfe771fb0c | ||
|
|
bc19e2d84b | ||
|
|
8c4d91cff7 |
@@ -2,10 +2,9 @@ language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.2.2
|
||||
- 1.3.3
|
||||
- 1.4
|
||||
- tip
|
||||
- 1.4.2
|
||||
- 1.5
|
||||
|
||||
script:
|
||||
- go get ./...
|
||||
|
||||
2
Makefile
2
Makefile
@@ -66,4 +66,4 @@ retag:
|
||||
git tag -f $(LAST_TAG)
|
||||
|
||||
gen_tests:
|
||||
cd fstest/fstests && go run gen_tests.go
|
||||
cd fstest/fstests && go generate
|
||||
|
||||
@@ -15,6 +15,7 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
||||
* Dropbox
|
||||
* Google Cloud Storage
|
||||
* Amazon Cloud Drive
|
||||
* The local filesystem
|
||||
|
||||
Features
|
||||
|
||||
@@ -12,7 +12,7 @@ Making a release
|
||||
* edit docs/content/changelog.md
|
||||
* git commit -a -v
|
||||
* make retag
|
||||
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross
|
||||
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross - not required for go >= 1.5
|
||||
* make cross
|
||||
* make upload
|
||||
* make upload_website
|
||||
|
||||
740
amazonclouddrive/amazonclouddrive.go
Normal file
740
amazonclouddrive/amazonclouddrive.go
Normal file
@@ -0,0 +1,740 @@
|
||||
// Amazon Cloud Drive interface
|
||||
package amazonclouddrive
|
||||
|
||||
/*
|
||||
|
||||
FIXME make searching for directory in id and file in id more efficient
|
||||
- use the name: search parameter - remember the escaping rules
|
||||
- use Folder GetNode and GetFile
|
||||
|
||||
FIXME make the default for no files and no dirs be (FILE & FOLDER) so
|
||||
we ignore assets completely!
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/dircache"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "amzn1.application-oa2-client.6bf18d2d1f5b485c94c8988bb03ad0e7"
|
||||
rcloneClientSecret = "k8/NyszKm5vEkZXAwsbGkd6C3NrbjIqMg4qEhIeF14Szub2wur+/teS3ubXgsLe9//+tr/qoqK+lq6mg8vWkoA=="
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
assetKind = "ASSET"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
maxSleep = 15 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
acdConfig = &oauth2.Config{
|
||||
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://www.amazon.com/ap/oa",
|
||||
TokenURL: "https://api.amazon.com/auth/o2/token",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.Reveal(rcloneClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.FsInfo{
|
||||
Name: "amazon cloud drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config(name, acdConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "client_id",
|
||||
Help: "Amazon Application Client Id - leave blank to use rclone's.",
|
||||
}, {
|
||||
Name: "client_secret",
|
||||
Help: "Amazon Application Client Secret - leave blank to use rclone's.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// FsAcd represents a remote acd server
|
||||
type FsAcd struct {
|
||||
name string // name of this remote
|
||||
c *acd.Client // the connection to the acd server
|
||||
root string // the path we are working on
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
connTokens chan struct{} // Connection tokens for directory listings
|
||||
}
|
||||
|
||||
// FsObjectAcd describes a acd object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type FsObjectAcd struct {
|
||||
acd *FsAcd // what this object is part of
|
||||
remote string // The remote path
|
||||
info *acd.Node // Info from the acd object if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsAcd) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// The root of the remote (as passed into NewFs)
|
||||
func (f *FsAcd) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this FsAcd to a string
|
||||
func (f *FsAcd) String() string {
|
||||
return fmt.Sprintf("Amazon cloud drive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Pattern to match a acd path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an acd 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
409, // Conflict - happens in the unit tests a lot
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// See if HTTP error code is to be retried
|
||||
if err != nil && resp != nil {
|
||||
for _, e := range retryErrorCodes {
|
||||
if resp.StatusCode == e {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow retry if request times out. Adapted from
|
||||
// http://stackoverflow.com/questions/23494950/specifically-check-for-timeout-error
|
||||
switch err := err.(type) {
|
||||
case *url.Error:
|
||||
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
|
||||
return true, err
|
||||
}
|
||||
case net.Error:
|
||||
if err.Timeout() {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// NewFs constructs an FsAcd from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, err := oauthutil.NewClient(name, acdConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure amazon cloud drive: %v", err)
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
c.UserAgent = fs.UserAgent
|
||||
f := &FsAcd{
|
||||
name: name,
|
||||
root: root,
|
||||
c: c,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
connTokens: make(chan struct{}, fs.Config.Checkers),
|
||||
}
|
||||
|
||||
// Insert connection tokens.
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
f.connTokens <- struct{}{}
|
||||
}
|
||||
|
||||
// Update endpoints
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, resp, err = f.c.Account.GetEndpoints()
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get endpoints: %v", err)
|
||||
}
|
||||
|
||||
// Get rootID
|
||||
var rootInfo *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rootInfo, resp, err = f.c.Nodes.GetRoot()
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil || rootInfo.Id == nil {
|
||||
return nil, fmt.Errorf("Failed to get root: %v", err)
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, *rootInfo.Id, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, *rootInfo.Id, &newF)
|
||||
newF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
obj := newF.newFsObjectWithInfo(remote, nil)
|
||||
if obj == nil {
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
}
|
||||
// return a Fs Limited to this object
|
||||
return fs.NewLimited(&newF, obj), nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsAcd) newFsObjectWithInfo(remote string, info *acd.Node) fs.Object {
|
||||
o := &FsObjectAcd{
|
||||
acd: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not meta
|
||||
o.info = info
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
// logged already FsDebug("Failed to read info: %s", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsAcd) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathId
|
||||
func (f *FsAcd) FindLeaf(pathId, leaf string) (pathIdOut string, found bool, err error) {
|
||||
//fs.Debug(f, "FindLeaf(%q, %q)", pathId, leaf)
|
||||
folder := acd.FolderFromId(pathId, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
var subFolder *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
subFolder, resp, err = folder.GetFolder(leaf)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == acd.ErrorNodeNotFound {
|
||||
//fs.Debug(f, "...Not found")
|
||||
return "", false, nil
|
||||
}
|
||||
//fs.Debug(f, "...Error %v", err)
|
||||
return "", false, err
|
||||
}
|
||||
if subFolder.Status != nil && *subFolder.Status != statusAvailable {
|
||||
fs.Debug(f, "Ignoring folder %q in state %q", *subFolder.Status)
|
||||
time.Sleep(1 * time.Second) // FIXME wait for problem to go away!
|
||||
return "", false, nil
|
||||
}
|
||||
//fs.Debug(f, "...Found(%q, %v)", *subFolder.Id, leaf)
|
||||
return *subFolder.Id, true, nil
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathId as parent and name leaf
|
||||
func (f *FsAcd) CreateDir(pathId, leaf string) (newId string, err error) {
|
||||
//fmt.Printf("CreateDir(%q, %q)\n", pathId, leaf)
|
||||
folder := acd.FolderFromId(pathId, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
var info *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.CreateFolder(leaf)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
//fmt.Printf("...Id %q\n", *info.Id)
|
||||
return *info.Id, nil
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
// User function to process a File item from listAll
|
||||
//
|
||||
// Should return true to finish processing
|
||||
type listAllFn func(*acd.Node) bool
|
||||
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *FsAcd) listAll(dirId string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
query := "parents:" + dirId
|
||||
if directoriesOnly {
|
||||
query += " AND kind:" + folderKind
|
||||
} else if filesOnly {
|
||||
query += " AND kind:" + fileKind
|
||||
} else {
|
||||
// FIXME none of these work
|
||||
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
|
||||
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
|
||||
}
|
||||
opts := acd.NodeListOptions{
|
||||
Filters: query,
|
||||
}
|
||||
var nodes []*acd.Node
|
||||
//var resp *http.Response
|
||||
OUTER:
|
||||
for {
|
||||
var resp *http.Response
|
||||
// Get a token
|
||||
_ = <-f.connTokens
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
// Reinsert token
|
||||
f.connTokens <- struct{}{}
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't list files: %v", err)
|
||||
break
|
||||
}
|
||||
if nodes == nil {
|
||||
break
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Name != nil && node.Id != nil && node.Kind != nil && node.Status != nil {
|
||||
// Ignore nodes if not AVAILABLE
|
||||
if *node.Status != statusAvailable {
|
||||
continue
|
||||
}
|
||||
if fn(node) {
|
||||
found = true
|
||||
break OUTER
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Path should be directory path either "" or "path/"
|
||||
//
|
||||
// List the directory using a recursive list from the root
|
||||
//
|
||||
// This fetches the minimum amount of stuff but does more API calls
|
||||
// which makes it slow
|
||||
func (f *FsAcd) listDirRecursive(dirId string, path string, out fs.ObjectsChan) error {
|
||||
var subError error
|
||||
// Make the API request
|
||||
var wg sync.WaitGroup
|
||||
_, err := f.listAll(dirId, "", false, false, func(node *acd.Node) bool {
|
||||
// Recurse on directories
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
wg.Add(1)
|
||||
folder := path + *node.Name + "/"
|
||||
fs.Debug(f, "Reading %s", folder)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := f.listDirRecursive(*node.Id, folder, out)
|
||||
if err != nil {
|
||||
subError = err
|
||||
fs.ErrorLog(f, "Error reading %s:%s", folder, err)
|
||||
}
|
||||
}()
|
||||
return false
|
||||
case fileKind:
|
||||
if fs := f.newFsObjectWithInfo(path+*node.Name, node); fs != nil {
|
||||
out <- fs
|
||||
}
|
||||
default:
|
||||
// ignore ASSET etc
|
||||
}
|
||||
return false
|
||||
})
|
||||
wg.Wait()
|
||||
fs.Debug(f, "Finished reading %s", path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if subError != nil {
|
||||
return subError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
func (f *FsAcd) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "List failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// Lists the containers
|
||||
func (f *FsAcd) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
_, err := f.listAll(f.dirCache.RootID(), "", true, false, func(item *acd.Node) bool {
|
||||
dir := &fs.Dir{
|
||||
Name: *item.Name,
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
}
|
||||
dir.When, _ = time.Parse(timeFormat, *item.ModifiedDate)
|
||||
out <- dir
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "ListDir failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsAcd) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
o := &FsObjectAcd{
|
||||
acd: f,
|
||||
remote: remote,
|
||||
}
|
||||
leaf, directoryID, err := f.dirCache.FindPath(remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
folder := acd.FolderFromId(directoryID, o.acd.c.Nodes)
|
||||
var info *acd.File
|
||||
var resp *http.Response
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size != 0 {
|
||||
info, resp, err = folder.Put(in, leaf)
|
||||
} else {
|
||||
info, resp, err = folder.PutSized(in, size, leaf)
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.info = info.Node
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *FsAcd) Mkdir() error {
|
||||
return f.dirCache.FindRoot(true)
|
||||
}
|
||||
|
||||
// purgeCheck remotes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *FsAcd) purgeCheck(check bool) error {
|
||||
if f.root == "" {
|
||||
return fmt.Errorf("Can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID := dc.RootID()
|
||||
|
||||
if check {
|
||||
// check directory is empty
|
||||
empty := true
|
||||
_, err := f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
empty = false
|
||||
return true
|
||||
case fileKind:
|
||||
empty = false
|
||||
return true
|
||||
default:
|
||||
fs.Debug("Found ASSET %s", *node.Id)
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !empty {
|
||||
return fmt.Errorf("Directory not empty")
|
||||
}
|
||||
}
|
||||
|
||||
node := acd.NodeFromId(rootID, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = node.Trash()
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.dirCache.ResetRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsAcd) Rmdir() error {
|
||||
return f.purgeCheck(true)
|
||||
}
|
||||
|
||||
// Return the precision
|
||||
func (f *FsAcd) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
//func (f *FsAcd) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// srcObj, ok := src.(*FsObjectAcd)
|
||||
// if !ok {
|
||||
// fs.Debug(src, "Can't copy - not same remote type")
|
||||
// return nil, fs.ErrorCantCopy
|
||||
// }
|
||||
// srcFs := srcObj.acd
|
||||
// _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// return f.NewFsObject(remote), nil
|
||||
//}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *FsAcd) Purge() error {
|
||||
return f.purgeCheck(false)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
func (o *FsObjectAcd) Fs() fs.Fs {
|
||||
return o.acd
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectAcd) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Return the remote path
|
||||
func (o *FsObjectAcd) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectAcd) Md5sum() (string, error) {
|
||||
if o.info.ContentProperties.Md5 != nil {
|
||||
return *o.info.ContentProperties.Md5, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectAcd) Size() int64 {
|
||||
return int64(*o.info.ContentProperties.Size)
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *FsObjectAcd) readMetaData() (err error) {
|
||||
if o.info != nil {
|
||||
return nil
|
||||
}
|
||||
leaf, directoryID, err := o.acd.dirCache.FindPath(o.remote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
folder := acd.FolderFromId(directoryID, o.acd.c.Nodes)
|
||||
var resp *http.Response
|
||||
var info *acd.File
|
||||
err = o.acd.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.GetFile(leaf)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
return err
|
||||
}
|
||||
o.info = info.Node
|
||||
return nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectAcd) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
return time.Now()
|
||||
}
|
||||
modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate)
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read mtime from object: %s", err)
|
||||
return time.Now()
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
|
||||
// Sets the modification time of the local fs object
|
||||
func (o *FsObjectAcd) SetModTime(modTime time.Time) {
|
||||
// FIXME not implemented
|
||||
return
|
||||
}
|
||||
|
||||
// Is this object storable
|
||||
func (o *FsObjectAcd) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectAcd) Open() (in io.ReadCloser, err error) {
|
||||
file := acd.File{Node: o.info}
|
||||
var resp *http.Response
|
||||
err = o.acd.pacer.Call(func() (bool, error) {
|
||||
in, resp, err = file.Open()
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return in, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectAcd) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
file := acd.File{Node: o.info}
|
||||
var info *acd.File
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = o.acd.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size != 0 {
|
||||
info, resp, err = file.OverwriteSized(in, size)
|
||||
} else {
|
||||
info, resp, err = file.Overwrite(in)
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.info = info.Node
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectAcd) Remove() error {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = o.acd.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.info.Trash()
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*FsAcd)(nil)
|
||||
_ fs.Purger = (*FsAcd)(nil)
|
||||
// _ fs.Copier = (*FsAcd)(nil)
|
||||
// _ fs.Mover = (*FsAcd)(nil)
|
||||
// _ fs.DirMover = (*FsAcd)(nil)
|
||||
_ fs.Object = (*FsObjectAcd)(nil)
|
||||
)
|
||||
56
amazonclouddrive/amazonclouddrive_test.go
Normal file
56
amazonclouddrive/amazonclouddrive_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package amazonclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/amazonclouddrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.FsObjectAcd)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
@@ -3,7 +3,7 @@
|
||||
set -e
|
||||
|
||||
# This uses gox from https://github.com/mitchellh/gox
|
||||
# Make sure you've run gox -build-toolchain
|
||||
# Make sure you've run gox -build-toolchain - not required for go >= 1.5
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
echo "Syntax: $0 Version"
|
||||
@@ -13,7 +13,9 @@ VERSION="$1"
|
||||
|
||||
rm -rf build
|
||||
|
||||
gox -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}"
|
||||
gox -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}" -os "darwin linux freebsd openbsd windows freebsd netbsd plan9 solaris"
|
||||
# Not implemented yet: nacl dragonfly android
|
||||
# gox -osarch-list for definitive list
|
||||
|
||||
mv build/rclone-${VERSION}-darwin-amd64 build/rclone-${VERSION}-osx-amd64
|
||||
mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
|
||||
@@ -25,6 +27,8 @@ for d in `ls`; do
|
||||
cp -a ../MANUAL.html $d/README.html
|
||||
cp -a ../rclone.1 $d/
|
||||
zip -r9 $d.zip $d
|
||||
d_current=${d/-${VERSION}/-current}
|
||||
ln $d.zip $d_current.zip
|
||||
rm -rf $d
|
||||
done
|
||||
|
||||
|
||||
283
dircache/dircache.go
Normal file
283
dircache/dircache.go
Normal file
@@ -0,0 +1,283 @@
|
||||
// dircache provides a simple cache for caching directory to path lookups
|
||||
package dircache
|
||||
|
||||
// _methods are called without the lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// DirCache caches paths to directory IDs and vice versa
|
||||
type DirCache struct {
|
||||
mu sync.RWMutex
|
||||
cache map[string]string
|
||||
invCache map[string]string
|
||||
fs DirCacher // Interface to find and make stuff
|
||||
trueRootID string // ID of the absolute root
|
||||
root string // the path we are working on
|
||||
rootID string // ID of the root directory
|
||||
rootParentID string // ID of the root's parent directory
|
||||
foundRoot bool // Whether we have found the root or not
|
||||
}
|
||||
|
||||
// DirCache describes an interface for doing the low level directory work
|
||||
type DirCacher interface {
|
||||
FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error)
|
||||
CreateDir(pathID, leaf string) (newID string, err error)
|
||||
}
|
||||
|
||||
// Make a new DirCache
|
||||
//
|
||||
// The cache is safe for concurrent use
|
||||
func New(root string, trueRootID string, fs DirCacher) *DirCache {
|
||||
d := &DirCache{
|
||||
trueRootID: trueRootID,
|
||||
root: root,
|
||||
fs: fs,
|
||||
}
|
||||
d.Flush()
|
||||
d.ResetRoot()
|
||||
return d
|
||||
}
|
||||
|
||||
// _get an ID given a path - without lock
|
||||
func (dc *DirCache) _get(path string) (id string, ok bool) {
|
||||
id, ok = dc.cache[path]
|
||||
return
|
||||
}
|
||||
|
||||
// Gets an ID given a path
|
||||
func (dc *DirCache) Get(path string) (id string, ok bool) {
|
||||
dc.mu.RLock()
|
||||
id, ok = dc._get(path)
|
||||
dc.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// GetInv gets a path given an ID
|
||||
func (dc *DirCache) GetInv(path string) (id string, ok bool) {
|
||||
dc.mu.RLock()
|
||||
id, ok = dc.invCache[path]
|
||||
dc.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// _put a path, id into the map without lock
|
||||
func (dc *DirCache) _put(path, id string) {
|
||||
dc.cache[path] = id
|
||||
dc.invCache[id] = path
|
||||
}
|
||||
|
||||
// Put a path, id into the map
|
||||
func (dc *DirCache) Put(path, id string) {
|
||||
dc.mu.Lock()
|
||||
dc._put(path, id)
|
||||
dc.mu.Unlock()
|
||||
}
|
||||
|
||||
// _flush the map of all data without lock
|
||||
func (dc *DirCache) _flush() {
|
||||
dc.cache = make(map[string]string)
|
||||
dc.invCache = make(map[string]string)
|
||||
}
|
||||
|
||||
// Flush the map of all data
|
||||
func (dc *DirCache) Flush() {
|
||||
dc.mu.Lock()
|
||||
dc._flush()
|
||||
dc.mu.Unlock()
|
||||
}
|
||||
|
||||
// Splits a path into directory, leaf
|
||||
//
|
||||
// Path shouldn't start or end with a /
|
||||
//
|
||||
// If there are no slashes then directory will be "" and leaf = path
|
||||
func SplitPath(path string) (directory, leaf string) {
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if lastSlash >= 0 {
|
||||
directory = path[:lastSlash]
|
||||
leaf = path[lastSlash+1:]
|
||||
} else {
|
||||
directory = ""
|
||||
leaf = path
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Finds the directory passed in returning the directory ID starting from pathID
|
||||
//
|
||||
// Path shouldn't start or end with a /
|
||||
//
|
||||
// If create is set it will make the directory if not found
|
||||
//
|
||||
// Algorithm:
|
||||
// Look in the cache for the path, if found return the pathID
|
||||
// If not found strip the last path off the path and recurse
|
||||
// Now have a parent directory id, so look in the parent for self and return it
|
||||
func (dc *DirCache) FindDir(path string, create bool) (pathID string, err error) {
|
||||
dc.mu.RLock()
|
||||
defer dc.mu.RUnlock()
|
||||
return dc._findDir(path, create)
|
||||
}
|
||||
|
||||
// Look for the root and in the cache - safe to call without the mu
|
||||
func (dc *DirCache) _findDirInCache(path string) string {
|
||||
// fmt.Println("Finding",path,"create",create,"cache",cache)
|
||||
// If it is the root, then return it
|
||||
if path == "" {
|
||||
// fmt.Println("Root")
|
||||
return dc.rootID
|
||||
}
|
||||
|
||||
// If it is in the cache then return it
|
||||
pathID, ok := dc._get(path)
|
||||
if ok {
|
||||
// fmt.Println("Cache hit on", path)
|
||||
return pathID
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Unlocked findDir - must have mu
|
||||
func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error) {
|
||||
// if !dc.foundRoot {
|
||||
// return "", fmt.Errorf("FindDir called before FindRoot")
|
||||
// }
|
||||
|
||||
pathID = dc._findDirInCache(path)
|
||||
if pathID != "" {
|
||||
return pathID, nil
|
||||
}
|
||||
|
||||
// Split the path into directory, leaf
|
||||
directory, leaf := SplitPath(path)
|
||||
|
||||
// Recurse and find pathID for parent directory
|
||||
parentPathID, err := dc._findDir(directory, create)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
}
|
||||
|
||||
// Find the leaf in parentPathID
|
||||
pathID, found, err := dc.fs.FindLeaf(parentPathID, leaf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If not found create the directory if required or return an error
|
||||
if !found {
|
||||
if create {
|
||||
pathID, err = dc.fs.CreateDir(parentPathID, leaf)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to make directory: %v", err)
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("Couldn't find directory: %q", path)
|
||||
}
|
||||
}
|
||||
|
||||
// Store the leaf directory in the cache
|
||||
dc._put(path, pathID)
|
||||
|
||||
// fmt.Println("Dir", path, "is", pathID)
|
||||
return pathID, nil
|
||||
}
|
||||
|
||||
// FindPath finds the leaf and directoryID from a path
|
||||
//
|
||||
// If create is set parent directories will be created if they don't exist
|
||||
func (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string, err error) {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
directory, leaf := SplitPath(path)
|
||||
directoryID, err = dc._findDir(directory, create)
|
||||
if err != nil {
|
||||
if create {
|
||||
err = fmt.Errorf("Couldn't find or make directory %q: %s", directory, err)
|
||||
} else {
|
||||
err = fmt.Errorf("Couldn't find directory %q: %s", directory, err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Finds the root directory if not already found
|
||||
//
|
||||
// Resets the root directory
|
||||
//
|
||||
// If create is set it will make the directory if not found
|
||||
func (dc *DirCache) FindRoot(create bool) error {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
if dc.foundRoot {
|
||||
return nil
|
||||
}
|
||||
dc.foundRoot = true
|
||||
rootID, err := dc._findDir(dc.root, create)
|
||||
if err != nil {
|
||||
dc.foundRoot = false
|
||||
return err
|
||||
}
|
||||
dc.rootID = rootID
|
||||
|
||||
// Find the parent of the root while we still have the root
|
||||
// directory tree cached
|
||||
rootParentPath, _ := SplitPath(dc.root)
|
||||
dc.rootParentID, _ = dc._get(rootParentPath)
|
||||
|
||||
// Reset the tree based on dc.root
|
||||
dc._flush()
|
||||
// Put the root directory in
|
||||
dc._put("", dc.rootID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RootID returns the ID of the root directory
|
||||
//
|
||||
// This should be called after FindRoot
|
||||
func (dc *DirCache) RootID() string {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
if !dc.foundRoot {
|
||||
log.Fatalf("Internal Error: RootID() called before FindRoot")
|
||||
}
|
||||
return dc.rootID
|
||||
}
|
||||
|
||||
// RootParentID returns the ID of the parent of the root directory
|
||||
//
|
||||
// This should be called after FindRoot
|
||||
func (dc *DirCache) RootParentID() (string, error) {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
if !dc.foundRoot {
|
||||
return "", fmt.Errorf("Internal Error: RootID() called before FindRoot")
|
||||
}
|
||||
if dc.rootParentID == "" {
|
||||
return "", fmt.Errorf("Internal Error: Didn't find rootParentID")
|
||||
}
|
||||
if dc.rootID == dc.trueRootID {
|
||||
return "", fmt.Errorf("Is root directory")
|
||||
}
|
||||
return dc.rootParentID, nil
|
||||
}
|
||||
|
||||
// Resets the root directory to the absolute root and clears the DirCache
|
||||
func (dc *DirCache) ResetRoot() {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
dc.foundRoot = false
|
||||
dc._flush()
|
||||
|
||||
// Put the true root in
|
||||
dc.rootID = dc.trueRootID
|
||||
|
||||
// Put the root directory in
|
||||
dc._put("", dc.rootID)
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
title: "Rclone"
|
||||
description: "rclone syncs files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox and Google Cloud Storage."
|
||||
description: "rclone syncs files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox, Google Cloud Storage and Amazon Cloud Drive."
|
||||
type: page
|
||||
date: "2014-07-17"
|
||||
date: "2015-09-06"
|
||||
groups: ["about"]
|
||||
---
|
||||
|
||||
@@ -18,6 +18,7 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
||||
* Dropbox
|
||||
* Google Cloud Storage
|
||||
* Amazon Cloud Drive
|
||||
* The local filesystem
|
||||
|
||||
Features
|
||||
|
||||
104
docs/content/amazonclouddrive.md
Normal file
104
docs/content/amazonclouddrive.md
Normal file
@@ -0,0 +1,104 @@
|
||||
---
|
||||
title: "Amazon Cloud Drive"
|
||||
description: "Rclone docs for Amazon Cloud Drive"
|
||||
date: "2015-09-06"
|
||||
---
|
||||
|
||||
<i class="fa fa-google"></i> Amazon Cloud Drive
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
The initial setup for Amazon cloud drive involves getting a token from
|
||||
Amazon which you need to do in your browser. `rclone config` walks
|
||||
you through it.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
name> remote
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) amazon cloud drive
|
||||
2) drive
|
||||
3) dropbox
|
||||
4) google cloud storage
|
||||
5) local
|
||||
6) s3
|
||||
7) swift
|
||||
type> 1
|
||||
Amazon Application Client Id - leave blank to use rclone's.
|
||||
client_id>
|
||||
Amazon Application Client Secret - leave blank to use rclone's.
|
||||
client_secret>
|
||||
Remote config
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
client_secret =
|
||||
token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxx","token_type":"bearer","refresh_token":"xxxxxxxxxxxxxxxxxx","expiry":"2015-09-06T16:07:39.658438471+01:00"}
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Amazon. This only runs from the moment it
|
||||
opens your browser to the moment you get back the verification
|
||||
code. This is on `http://127.0.0.1:53682/` and this it may require
|
||||
you to unblock it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your Amazon cloud drive
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
List all the files in your Amazon cloud drive
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to an Amazon cloud drive directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
### Modified time and MD5SUMs ###
|
||||
|
||||
Amazon cloud drive doesn't allow modification times to be changed via
|
||||
the API so these won't be accurate or used for syncing.
|
||||
|
||||
It does store MD5SUMs so for a more accurate sync, you can use the
|
||||
`--checksum` flag.
|
||||
|
||||
### Deleting files ###
|
||||
|
||||
Any files you delete with rclone will end up in the trash. Amazon
|
||||
don't provide an API to permanently delete files, nor to empty the
|
||||
trash, so you will have to do that with one of Amazon's apps or via
|
||||
the Amazon cloud drive website.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that Amazon cloud drive is case sensitive so you can't have a
|
||||
file called "Hello.doc" and one called "hello.doc".
|
||||
|
||||
Amazon cloud drive has rate limiting so you may notice errors in the
|
||||
sync (429 errors). rclone will automatically retry the sync up to 3
|
||||
times by default (see `--retries` flag) which should hopefully work
|
||||
around this problem.
|
||||
@@ -15,3 +15,5 @@ Contributors
|
||||
* Alex Couper <amcouper@gmail.com>
|
||||
* Leonid Shalupov <leonid@shalupov.com>
|
||||
* Shimon Doodkin <helpmepro1@gmail.com>
|
||||
* Colin Nicholson <colin@colinn.com>
|
||||
* Klaus Post <klauspost@gmail.com>
|
||||
|
||||
@@ -1,12 +1,39 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Changelog"
|
||||
date: "2015-08-17"
|
||||
date: "2015-09-15"
|
||||
---
|
||||
|
||||
Changelog
|
||||
---------
|
||||
|
||||
* v1.20 - 2015-09-15
|
||||
* New features
|
||||
* Amazon Cloud Drive support
|
||||
* Oauth support redone - fix many bugs and improve usability
|
||||
* Use "golang.org/x/oauth2" as oauth libary of choice
|
||||
* Improve oauth usability for smoother initial signup
|
||||
* drive, googlecloudstorage: optionally use auto config for the oauth token
|
||||
* Implement --dump-headers and --dump-bodies debug flags
|
||||
* Show multiple matched commands if abbreviation too short
|
||||
* Implement server side move where possible
|
||||
* local
|
||||
* Always use UNC paths internally on Windows - fixes a lot of bugs
|
||||
* dropbox
|
||||
* force use of our custom transport which makes timeouts work
|
||||
* Thanks to Klaus Post for lots of help with this release
|
||||
* v1.19 - 2015-08-28
|
||||
* New features
|
||||
* Server side copies for s3/swift/drive/dropbox/gcs
|
||||
* Move command - uses server side copies if it can
|
||||
* Implement --retries flag - tries 3 times by default
|
||||
* Build for plan9/amd64 and solaris/amd64 too
|
||||
* Fixes
|
||||
* Make a current version download with a fixed URL for scripting
|
||||
* Ignore rmdir in limited fs rather than throwing error
|
||||
* dropbox
|
||||
* Increase chunk size to improve upload speeds massively
|
||||
* Issue an error message when trying to upload bad file name
|
||||
* v1.18 - 2015-08-17
|
||||
* drive
|
||||
* Add `--drive-use-trash` flag so rclone trashes instead of deletes
|
||||
|
||||
@@ -23,7 +23,7 @@ See the following for detailed instructions for
|
||||
* [Amazon S3](/s3/)
|
||||
* [Swift / Rackspace Cloudfiles / Memset Memstore](/swift/)
|
||||
* [Dropbox](/dropbox/)
|
||||
* [Google Cloud Storage](/googlcloudstorage/)
|
||||
* [Google Cloud Storage](/googlecloudstorage/)
|
||||
* [Local filesystem](/local/)
|
||||
|
||||
Usage
|
||||
@@ -103,6 +103,37 @@ Enter an interactive configuration session.
|
||||
|
||||
Prints help on rclone commands and options.
|
||||
|
||||
Server Side Copy
|
||||
----------------
|
||||
|
||||
Drive, S3, Dropbox, Swift and Google Cloud Storage support server side
|
||||
copy.
|
||||
|
||||
This means if you want to copy one folder to another then rclone won't
|
||||
download all the files and re-upload them; it will instruct the server
|
||||
to copy them in place.
|
||||
|
||||
Eg
|
||||
|
||||
rclone copy s3:oldbucket s3:newbucket
|
||||
|
||||
Will copy the contents of `oldbucket` to `newbucket` without
|
||||
downloading and re-uploading.
|
||||
|
||||
Remotes which don't support server side copy (eg local) **will**
|
||||
download and re-upload in this case.
|
||||
|
||||
Server side copies are used with `sync` and `copy` and will be
|
||||
identified in the log when using the `-v` flag.
|
||||
|
||||
Server side copies will only be attempted if the remote names are the
|
||||
same.
|
||||
|
||||
This can be used when scripting to make aged backups efficiently, eg
|
||||
|
||||
rclone sync remote:current-backup remote:previous-backup
|
||||
rclone sync /path/to/files remote:current-backup
|
||||
|
||||
Options
|
||||
-------
|
||||
|
||||
|
||||
30
docs/content/donate.md
Normal file
30
docs/content/donate.md
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
title: "Flowers for My Wife"
|
||||
description: "Flowers for My Wife."
|
||||
type: page
|
||||
date: "2015-09-06"
|
||||
---
|
||||
|
||||
Flowers for My Wife
|
||||
===================
|
||||
|
||||
Rclone is a pure open source for love-not-money project. However I've
|
||||
had requests for a donation page and coding it does take me away from
|
||||
something else I love - my wonderful wife.
|
||||
|
||||
So if you would like to send a donation, I will use it to buy flowers
|
||||
for her which will make her very happy.
|
||||
|
||||
<form action="https://www.paypal.com/cgi-bin/webscr" method="post" target="_top">
|
||||
<input type="hidden" name="cmd" value="_s-xclick">
|
||||
<input type="hidden" name="hosted_button_id" value="XQMMNUD5ZY49J">
|
||||
<input type="image" src="https://www.paypalobjects.com/en_US/GB/i/btn/btn_donateCC_LG.gif" border="0" name="submit" alt="PayPal – The safer, easier way to pay online.">
|
||||
<img alt="" border="0" src="https://www.paypalobjects.com/en_GB/i/scr/pixel.gif" width="1" height="1">
|
||||
</form>
|
||||
|
||||
If you would prefer to express your gratitude by promoting the
|
||||
project, or helping with it, I'd be over the moon with that too!
|
||||
|
||||
Thanks
|
||||
|
||||
Nick
|
||||
@@ -2,34 +2,73 @@
|
||||
title: "Rclone downloads"
|
||||
description: "Download rclone binaries for your OS."
|
||||
type: page
|
||||
date: "2015-08-17"
|
||||
date: "2015-09-15"
|
||||
---
|
||||
|
||||
Rclone Download v1.18
|
||||
Rclone Download v1.20
|
||||
=====================
|
||||
|
||||
* Windows
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-windows-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.20-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.20-windows-amd64.zip)
|
||||
* OSX
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-osx-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.20-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.20-osx-amd64.zip)
|
||||
* Linux
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.18-linux-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.20-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.20-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.20-linux-arm.zip)
|
||||
* FreeBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.18-freebsd-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.20-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.20-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.20-freebsd-arm.zip)
|
||||
* NetBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.18-netbsd-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.20-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.20-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.20-netbsd-arm.zip)
|
||||
* OpenBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-openbsd-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.20-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.20-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-plan9-386.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.20-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.20-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.20-solaris-amd64.zip)
|
||||
|
||||
Downloads for scripting
|
||||
=======================
|
||||
|
||||
If you would like to download the current version (maybe from a
|
||||
script) from a URL which doesn't change then you can use these links.
|
||||
|
||||
* Windows
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-windows-amd64.zip)
|
||||
* OSX
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-osx-amd64.zip)
|
||||
* Linux
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-linux-arm.zip)
|
||||
* FreeBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-arm.zip)
|
||||
* NetBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-arm.zip)
|
||||
* OpenBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-solaris-amd64.zip)
|
||||
|
||||
Older Downloads
|
||||
==============
|
||||
|
||||
Older downloads can be found [here](http://downloads.rclone.org/)
|
||||
|
||||
@@ -31,5 +31,44 @@ Rclone Download VERSION
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-solaris-amd64.zip)
|
||||
|
||||
Downloads for scripting
|
||||
=======================
|
||||
|
||||
If you would like to download the current version (maybe from a
|
||||
script) from a URL which doesn't change then you can use these links.
|
||||
|
||||
* Windows
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-windows-amd64.zip)
|
||||
* OSX
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-osx-amd64.zip)
|
||||
* Linux
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-linux-arm.zip)
|
||||
* FreeBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-arm.zip)
|
||||
* NetBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-arm.zip)
|
||||
* OpenBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-solaris-amd64.zip)
|
||||
|
||||
Older Downloads
|
||||
==============
|
||||
|
||||
Older downloads can be found [here](http://downloads.rclone.org/)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Google drive"
|
||||
description: "Rclone docs for Google drive"
|
||||
date: "2015-05-10"
|
||||
date: "2015-09-12"
|
||||
---
|
||||
|
||||
<i class="fa fa-google"></i> Google Drive
|
||||
@@ -39,10 +39,16 @@ client_id>
|
||||
Google Application Client Secret - leave blank to use rclone's.
|
||||
client_secret>
|
||||
Remote config
|
||||
Go to the following link in your browser
|
||||
https://accounts.google.com/o/oauth2/auth?access_type=&approval_prompt=&client_id=XXXXXXXXXXXX.apps.googleusercontent.com&redirect_uri=urn%3XXXXX%3Awg%3Aoauth%3XX.0%3Aoob&response_type=code&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&state=state
|
||||
Log in, then type paste the token that is returned in the browser here
|
||||
Enter verification code> X/XXXXXXXXXXXXXXXXXX-XXXXXXXXX.XXXXXXXXX-XXXXX_XXXXXXX_XXXXXXX
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
@@ -55,6 +61,13 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Google if you use auto config mode. This only
|
||||
runs from the moment it opens your browser to the moment you get back
|
||||
the verification code. This is on `http://127.0.0.1:53682/` and this
|
||||
it may require you to unblock it temporarily if you are running a host
|
||||
firewall, or use manual mode.
|
||||
|
||||
You can then use it like this,
|
||||
|
||||
List directories in top level of your drive
|
||||
|
||||
@@ -80,3 +80,9 @@ MD5SUMs so syncs will effectively have the `--size-only` flag set.
|
||||
|
||||
Note that Dropbox is case sensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
There are some file names such as `thumbs.db` which Dropbox can't
|
||||
store. There is a full list of them in the ["Ignored Files" section
|
||||
of this document](https://www.dropbox.com/en/help/145). Rclone will
|
||||
issue an error message `File name disallowed - not uploading` if it
|
||||
attempt to upload one of those file names, but the sync won't fail.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "FAQ"
|
||||
description: "Rclone Frequently Asked Questions"
|
||||
date: "2015-06-06"
|
||||
date: "2015-08-27"
|
||||
---
|
||||
|
||||
Frequently Asked Questions
|
||||
@@ -13,6 +13,31 @@ Yes they do. All the rclone commands (eg `sync`, `copy` etc) will
|
||||
work on all the remote storage systems.
|
||||
|
||||
|
||||
### Can I copy the config from one machine to another ###
|
||||
|
||||
Sure! Rclone stores all of its config in a single file. If you want
|
||||
to find this file, the simplest way is to run `rclone -h` and look at
|
||||
the help for the `--config` flag which will tell you where it is. Eg,
|
||||
|
||||
```
|
||||
$ rclone -h
|
||||
Sync files and directories to and from local and remote object stores - v1.18.
|
||||
[snip]
|
||||
Options:
|
||||
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
|
||||
--checkers=8: Number of checkers to run in parallel.
|
||||
-c, --checksum=false: Skip based on checksum & size, not mod-time & size
|
||||
--config="/home/user/.rclone.conf": Config file.
|
||||
[snip]
|
||||
```
|
||||
|
||||
So in this config the config file can be found in
|
||||
`/home/user/.rclone.conf`.
|
||||
|
||||
Just copy that to the equivalent place in the destination (run `rclone
|
||||
-h` above again on the destination machine if not sure).
|
||||
|
||||
|
||||
### Can rclone sync directly from drive to s3 ###
|
||||
|
||||
Rclone can sync between two remote cloud storage systems just fine.
|
||||
@@ -48,3 +73,31 @@ Server B> rclone copy /tmp/whatever remote:Backup
|
||||
The file names you upload from Server A and Server B should be
|
||||
different in this case, otherwise some file systems (eg Drive) may
|
||||
make duplicates.
|
||||
|
||||
### Why doesn't rclone support partial transfers / binary diffs like rsync? ###
|
||||
|
||||
Rclone stores each file you transfer as a native object on the remote
|
||||
cloud storage system. This means that you can see the files you
|
||||
upload as expected using alternative access methods (eg using the
|
||||
Google Drive web interface). There is a 1:1 mapping between files on
|
||||
your hard disk and objects created in the cloud storage system.
|
||||
|
||||
Cloud storage systems (at least none I've come across yet) don't
|
||||
support partially uploading an object. You can't take an existing
|
||||
object, and change some bytes in the middle of it.
|
||||
|
||||
It would be possible to make a sync system which stored binary diffs
|
||||
instead of whole objects like rclone does, but that would break the
|
||||
1:1 mapping of files on your hard disk to objects in the remote cloud
|
||||
storage system.
|
||||
|
||||
All the cloud storage systems support partial downloads of content, so
|
||||
it would be possible to make partial downloads work. However to make
|
||||
this work efficiently this would require storing a significant amount
|
||||
of metadata, which breaks the desired 1:1 mapping of files to objects.
|
||||
|
||||
### Can rclone do bi-directional sync? ###
|
||||
|
||||
No, not at present. rclone only does uni-directional sync from A ->
|
||||
B. It may do in the future though since it has all the primitives - it
|
||||
just requires writing the algorithm to do it.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Google Cloud Storage"
|
||||
description: "Rclone docs for Google Cloud Storage"
|
||||
date: "2014-07-17"
|
||||
date: "2015-09-12"
|
||||
---
|
||||
|
||||
<i class="fa fa-google"></i> Google Cloud Storage
|
||||
@@ -70,10 +70,17 @@ Choose a number from below, or type in your own value
|
||||
5) publicReadWrite
|
||||
bucket_acl> 2
|
||||
Remote config
|
||||
Go to the following link in your browser
|
||||
https://accounts.google.com/o/oauth2/auth?access_type=&approval_prompt=&client_id=XXXXXXXXXXXX.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&response_type=code&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdevstorage.full_control&state=state
|
||||
Log in, then type paste the token that is returned in the browser here
|
||||
Enter verification code> x/xxxxxxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxxxxxxxxxx_xxxxxxxx
|
||||
Remote config
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
type = google cloud storage
|
||||
@@ -90,6 +97,13 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Google if you use auto config mode. This only
|
||||
runs from the moment it opens your browser to the moment you get back
|
||||
the verification code. This is on `http://127.0.0.1:53682/` and this
|
||||
it may require you to unblock it temporarily if you are running a host
|
||||
firewall, or use manual mode.
|
||||
|
||||
This remote is called `remote` and can now be used like this
|
||||
|
||||
See all the buckets in your project
|
||||
|
||||
71
docs/content/overview.md
Normal file
71
docs/content/overview.md
Normal file
@@ -0,0 +1,71 @@
|
||||
---
|
||||
title: "Overview of cloud storage systems"
|
||||
description: "Overview of cloud storage systems"
|
||||
type: page
|
||||
date: "2015-09-06"
|
||||
---
|
||||
|
||||
# Overview of cloud storage systems #
|
||||
|
||||
Each cloud storage system is slighly different. Rclone attempts to
|
||||
provide a unified interface to them, but some underlying differences
|
||||
show through.
|
||||
|
||||
## Features ##
|
||||
|
||||
Here is an overview of the major features of each cloud storage system.
|
||||
|
||||
| Name | MD5SUM | ModTime | Case Sensitive | Duplicate Files |
|
||||
| ---------------------- |:-------:|:-------:|:--------------:|:---------------:|
|
||||
| Google Drive | Yes | Yes | No | Yes |
|
||||
| Amazon S3 | Yes | Yes | No | No |
|
||||
| Openstack Swift | Yes | Yes | No | No |
|
||||
| Dropbox | No | No | Yes | No |
|
||||
| Google Cloud Storage | Yes | Yes | No | No |
|
||||
| Amazon Cloud Drive | Yes | No | Yes | No |
|
||||
| The local filesystem | Yes | Yes | Depends | No |
|
||||
|
||||
### MD5SUM ###
|
||||
|
||||
The cloud storage system supports MD5SUMs of the objects. This
|
||||
is used if available when transferring data as an integrity check and
|
||||
can be specifically used with the `--checksum` flag in syncs and in
|
||||
the `check` command.
|
||||
|
||||
### ModTime ###
|
||||
|
||||
The cloud storage system supports setting modification times on
|
||||
objects. If it does then this enables a using the modification times
|
||||
as part of the sync. If not then only the size will be checked by
|
||||
default, though the MD5SUM can be checked with the `--checksum` flag.
|
||||
|
||||
All cloud storage systems support some kind of date on the object and
|
||||
these will be set when transferring from the cloud storage system.
|
||||
|
||||
### Case Sensitive ###
|
||||
|
||||
If a cloud storage systems is case sensitive then it is possible to
|
||||
have two files which differ only in case, eg `file.txt` and
|
||||
`FILE.txt`. If a cloud storage system is case insensitive then that
|
||||
isn't possible.
|
||||
|
||||
This can cause problems when syncing between a case insensitive
|
||||
system and a case sensitive system. The symptom of this is that no
|
||||
matter how many times you run the sync it never completes fully.
|
||||
|
||||
The local filesystem may or may not be case sensitive depending on OS.
|
||||
|
||||
* Windows - usuall case insensitive
|
||||
* OSX - usually case insensitive, though it is possible to format case sensitive
|
||||
* Linux - usually case sensitive, but there are case insensitive file systems (eg FAT formatted USB keys)
|
||||
|
||||
Most of the time this doesn't cause any problems as people tend to
|
||||
avoid files whose name differs only by case even on case sensitive
|
||||
systems.
|
||||
|
||||
### Duplicate files ###
|
||||
|
||||
If a cloud storage system allows duplicate files then it can have two
|
||||
objects with the same name.
|
||||
|
||||
This confuses rclone greatly when syncing.
|
||||
65
docs/content/privacy.md
Normal file
65
docs/content/privacy.md
Normal file
@@ -0,0 +1,65 @@
|
||||
---
|
||||
title: "Privacy Policy"
|
||||
description: "Rclone Privacy Policy"
|
||||
date: "2015-08-19"
|
||||
---
|
||||
|
||||
# Rclone Privacy Policy #
|
||||
|
||||
## What is this Privacy Policy for? ##
|
||||
|
||||
This privacy policy is for this website http://rclone.org and governs the privacy of its users who choose to use it.
|
||||
|
||||
The policy sets out the different areas where user privacy is concerned and outlines the obligations & requirements of the users, the website and website owners. Furthermore the way this website processes, stores and protects user data and information will also be detailed within this policy.
|
||||
|
||||
## The Website ##
|
||||
|
||||
This website and its owners take a proactive approach to user privacy and ensure the necessary steps are taken to protect the privacy of its users throughout their visiting experience. This website complies to all UK national laws and requirements for user privacy.
|
||||
|
||||
## Use of Cookies ##
|
||||
|
||||
This website uses cookies to better the users experience while visiting the website. Where applicable this website uses a cookie control system allowing the user on their first visit to the website to allow or disallow the use of cookies on their computer / device. This complies with recent legislation requirements for websites to obtain explicit consent from users before leaving behind or reading files such as cookies on a user's computer / device.
|
||||
|
||||
Cookies are small files saved to the user's computers hard drive that track, save and store information about the user's interactions and usage of the website. This allows the website, through its server to provide the users with a tailored experience within this website.
|
||||
|
||||
Users are advised that if they wish to deny the use and saving of cookies from this website on to their computers hard drive they should take necessary steps within their web browsers security settings to block all cookies from this website and its external serving vendors.
|
||||
|
||||
This website uses tracking software to monitor its visitors to better understand how they use it. This software is provided by Google Analytics which uses cookies to track visitor usage. The software will save a cookie to your computers hard drive in order to track and monitor your engagement and usage of the website, but will not store, save or collect personal information. You can read [Google's privacy policy here](http://www.google.com/privacy.html) for further information.
|
||||
|
||||
Other cookies may be stored to your computers hard drive by external vendors when this website uses referral programs, sponsored links or adverts. Such cookies are used for conversion and referral tracking and typically expire after 30 days, though some may take longer. No personal information is stored, saved or collected.
|
||||
|
||||
## Contact & Communication ##
|
||||
|
||||
Users contacting this website and/or its owners do so at their own discretion and provide any such personal details requested at their own risk. Your personal information is kept private and stored securely until a time it is no longer required or has no use, as detailed in the Data Protection Act 1998.
|
||||
|
||||
This website and its owners use any information submitted to provide you with further information about the products / services they offer or to assist you in answering any questions or queries you may have submitted.
|
||||
|
||||
## External Links ##
|
||||
|
||||
Although this website only looks to include quality, safe and relevant external links, users are advised adopt a policy of caution before clicking any external web links mentioned throughout this website.
|
||||
|
||||
The owners of this website cannot guarantee or verify the contents of any externally linked website despite their best efforts. Users should therefore note they click on external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
|
||||
|
||||
## Adverts and Sponsored Links ##
|
||||
|
||||
This website may contain sponsored links and adverts. These will typically be served through our advertising partners, to whom may have detailed privacy policies relating directly to the adverts they serve.
|
||||
|
||||
Clicking on any such adverts will send you to the advertisers website through a referral program which may use cookies and will track the number of referrals sent from this website. This may include the use of cookies which may in turn be saved on your computers hard drive. Users should therefore note they click on sponsored external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
|
||||
|
||||
### Social Media Platforms ##
|
||||
|
||||
Communication, engagement and actions taken through external social media platforms that this website and its owners participate on are subject to the terms and conditions as well as the privacy policies held with each social media platform respectively.
|
||||
|
||||
Users are advised to use social media platforms wisely and communicate / engage upon them with due care and caution in regard to their own privacy and personal details. This website nor its owners will ever ask for personal or sensitive information through social media platforms and encourage users wishing to discuss sensitive details to contact them through primary communication channels such as email.
|
||||
|
||||
This website may use social sharing buttons which help share web content directly from web pages to the social media platform in question. Users are advised before using such social sharing buttons that they do so at their own discretion and note that the social media platform may track and save your request to share a web page respectively through your social media platform account.
|
||||
|
||||
## Resources & Further Information ##
|
||||
|
||||
* [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents)
|
||||
* [Privacy and Electronic Communications Regulations 2003](http://www.legislation.gov.uk/uksi/2003/2426/contents/made)
|
||||
* [Privacy and Electronic Communications Regulations 2003 - The Guide](https://ico.org.uk/for-organisations/guide-to-pecr/)
|
||||
* [Twitter Privacy Policy](http://twitter.com/privacy)
|
||||
* [Facebook Privacy Policy](http://www.facebook.com/about/privacy/)
|
||||
* [Google Privacy Policy](http://www.google.com/privacy.html)
|
||||
* [Sample Website Privacy Policy](http://www.jamieking.co.uk/resources/free_sample_privacy_policy.html)
|
||||
@@ -125,6 +125,14 @@ rclone supports multipart uploads with S3 which means that it can
|
||||
upload files bigger than 5GB. Note that files uploaded with multipart
|
||||
upload don't have an MD5SUM.
|
||||
|
||||
### Buckets and Regions ###
|
||||
|
||||
With Amazon S3 you can list buckets (`rclone lsd`) using any region,
|
||||
but you can only access the content of a bucket from the region it was
|
||||
created in. If you attempt to access a bucket from the wrong region,
|
||||
you will get an error, `incorrect region, the bucket is not in 'XXX'
|
||||
region`.
|
||||
|
||||
### Ceph ###
|
||||
|
||||
Ceph is an object storage system which presents an Amazon S3 interface.
|
||||
|
||||
@@ -22,16 +22,20 @@
|
||||
<li><a href="/faq/"><i class="fa fa-book"></i> FAQ</a></li>
|
||||
<li><a href="/licence/"><i class="fa fa-book"></i> Licence</a></li>
|
||||
<li><a href="/authors/"><i class="fa fa-book"></i> Authors</a></li>
|
||||
<li><a href="/donate/"><i class="fa fa-book"></i> Donate</a></li>
|
||||
<li><a href="/privacy/"><i class="fa fa-book"></i> Privacy Policy</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="dropdown">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Storage Systems</a>
|
||||
<ul class="dropdown-menu">
|
||||
<li><a href="/overview/"><i class="fa fa-archive"></i> Overview</a></li>
|
||||
<li><a href="/drive/"><i class="fa fa-google"></i> Drive</a></li>
|
||||
<li><a href="/s3/"><i class="fa fa-archive"></i> S3</a></li>
|
||||
<li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Swift</a></li>
|
||||
<li><a href="/dropbox/"><i class="fa fa-dropbox"></i> Dropbox</a></li>
|
||||
<li><a href="/googlecloudstorage/"><i class="fa fa-google"></i> Google Cloud Storage</a></li>
|
||||
<li><a href="/amazonclouddrive/"><i class="fa fa-archive"></i> Amazon Cloud Drive</a></li>
|
||||
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
21
docs/static/css/custom.css
vendored
21
docs/static/css/custom.css
vendored
@@ -4,4 +4,23 @@ body {
|
||||
|
||||
footer {
|
||||
margin: 50px 0;
|
||||
}
|
||||
}
|
||||
|
||||
table {
|
||||
background-color:#e0e0ff
|
||||
}
|
||||
|
||||
tbody td, th {
|
||||
border: 1px solid black;
|
||||
padding: 3px 7px 2px 7px;
|
||||
}
|
||||
|
||||
thead td, th {
|
||||
border: 1px solid black;
|
||||
padding: 3px 7px 2px 7px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
tbody tr:nth-child(odd) {
|
||||
background-color:#d0d0ff
|
||||
}
|
||||
|
||||
630
drive/drive.go
630
drive/drive.go
@@ -10,23 +10,27 @@ package drive
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/drive/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
"github.com/ncw/rclone/dircache"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/googleauth"
|
||||
"github.com/ogier/pflag"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientId = "202264815644.apps.googleusercontent.com"
|
||||
rcloneClientSecret = "X4Z3ca8xfWDb1Voo-F9a7ZxJ"
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
rcloneClientSecret = "8p/yms3OlNXE9OTDl/HLypf9gdiJ5cT3"
|
||||
driveFolderType = "application/vnd.google-apps.folder"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
@@ -45,10 +49,12 @@ var (
|
||||
chunkSize = fs.SizeSuffix(256 * 1024)
|
||||
driveUploadCutoff = chunkSize
|
||||
// Description of how to auth for this app
|
||||
driveAuth = &googleauth.Auth{
|
||||
Scope: "https://www.googleapis.com/auth/drive",
|
||||
DefaultClientId: rcloneClientId,
|
||||
DefaultClientSecret: rcloneClientSecret,
|
||||
driveConfig = &oauth2.Config{
|
||||
Scopes: []string{"https://www.googleapis.com/auth/drive"},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.Reveal(rcloneClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -58,7 +64,10 @@ func init() {
|
||||
Name: "drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
driveAuth.Config(name)
|
||||
err := oauthutil.Config(name, driveConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "client_id",
|
||||
@@ -74,17 +83,13 @@ func init() {
|
||||
|
||||
// FsDrive represents a remote drive server
|
||||
type FsDrive struct {
|
||||
svc *drive.Service // the connection to the drive server
|
||||
root string // the path we are working on
|
||||
client *http.Client // authorized client
|
||||
about *drive.About // information about the drive, including the root
|
||||
rootId string // Id of the root directory
|
||||
foundRoot bool // Whether we have found the root or not
|
||||
findRootLock sync.Mutex // Protect findRoot from concurrent use
|
||||
dirCache *dirCache // Map of directory path to directory id
|
||||
findDirLock sync.Mutex // Protect findDir from concurrent use
|
||||
pacer chan struct{} // To pace the operations
|
||||
sleepTime time.Duration // Time to sleep for each transaction
|
||||
name string // name of this remote
|
||||
svc *drive.Service // the connection to the drive server
|
||||
root string // the path we are working on
|
||||
client *http.Client // authorized client
|
||||
about *drive.About // information about the drive, including the root
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// FsObjectDrive describes a drive object
|
||||
@@ -98,99 +103,28 @@ type FsObjectDrive struct {
|
||||
modifiedDate string // RFC3339 time it was last modified
|
||||
}
|
||||
|
||||
// dirCache caches paths to directory Ids and vice versa
|
||||
type dirCache struct {
|
||||
sync.RWMutex
|
||||
cache map[string]string
|
||||
invCache map[string]string
|
||||
}
|
||||
|
||||
// Make a new locked map
|
||||
func newDirCache() *dirCache {
|
||||
d := &dirCache{}
|
||||
d.Flush()
|
||||
return d
|
||||
}
|
||||
|
||||
// Gets an Id given a path
|
||||
func (m *dirCache) Get(path string) (id string, ok bool) {
|
||||
m.RLock()
|
||||
id, ok = m.cache[path]
|
||||
m.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// GetInv gets a path given an Id
|
||||
func (m *dirCache) GetInv(path string) (id string, ok bool) {
|
||||
m.RLock()
|
||||
id, ok = m.invCache[path]
|
||||
m.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Put a path, id into the map
|
||||
func (m *dirCache) Put(path, id string) {
|
||||
m.Lock()
|
||||
m.cache[path] = id
|
||||
m.invCache[id] = path
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
// Flush the map of all data
|
||||
func (m *dirCache) Flush() {
|
||||
m.Lock()
|
||||
m.cache = make(map[string]string)
|
||||
m.invCache = make(map[string]string)
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsDrive) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// The root of the remote (as passed into NewFs)
|
||||
func (f *FsDrive) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this FsDrive to a string
|
||||
func (f *FsDrive) String() string {
|
||||
return fmt.Sprintf("Google drive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Start a call to the drive API
|
||||
//
|
||||
// This must be called as a pair with endCall
|
||||
//
|
||||
// This waits for the pacer token
|
||||
func (f *FsDrive) beginCall() {
|
||||
// pacer starts with a token in and whenever we take one out
|
||||
// XXX ms later we put another in. We could do this with a
|
||||
// Ticker more accurately, but then we'd have to work out how
|
||||
// not to run it when it wasn't needed
|
||||
<-f.pacer
|
||||
|
||||
// Restart the timer
|
||||
go func(t time.Duration) {
|
||||
// fs.Debug(f, "New sleep for %v at %v", t, time.Now())
|
||||
time.Sleep(t)
|
||||
f.pacer <- struct{}{}
|
||||
}(f.sleepTime)
|
||||
}
|
||||
|
||||
// End a call to the drive API
|
||||
//
|
||||
// Refresh the pace given an error that was returned. It returns a
|
||||
// boolean as to whether the operation should be retried.
|
||||
//
|
||||
// See https://developers.google.com/drive/web/handle-errors
|
||||
// http://stackoverflow.com/questions/18529524/403-rate-limit-after-only-1-insert-per-second
|
||||
func (f *FsDrive) endCall(err error) bool {
|
||||
again := false
|
||||
oldSleepTime := f.sleepTime
|
||||
if err == nil {
|
||||
f.sleepTime = (f.sleepTime<<decayConstant - f.sleepTime) >> decayConstant
|
||||
if f.sleepTime < minSleep {
|
||||
f.sleepTime = minSleep
|
||||
}
|
||||
if f.sleepTime != oldSleepTime {
|
||||
fs.Debug(f, "Reducing sleep to %v", f.sleepTime)
|
||||
}
|
||||
} else {
|
||||
fs.Debug(f, "Error recived: %T %#v", err, err)
|
||||
// shouldRetry determines whehter a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
again = false
|
||||
errOut = err
|
||||
if err != nil {
|
||||
// Check for net error Timeout()
|
||||
if x, ok := err.(interface {
|
||||
Timeout() bool
|
||||
@@ -216,30 +150,7 @@ func (f *FsDrive) endCall(err error) bool {
|
||||
}
|
||||
}
|
||||
}
|
||||
if again {
|
||||
f.sleepTime *= 2
|
||||
if f.sleepTime > maxSleep {
|
||||
f.sleepTime = maxSleep
|
||||
}
|
||||
if f.sleepTime != oldSleepTime {
|
||||
fs.Debug(f, "Rate limited, increasing sleep to %v", f.sleepTime)
|
||||
}
|
||||
}
|
||||
return again
|
||||
}
|
||||
|
||||
// Pace the remote operations to not exceed Google's limits and retry
|
||||
// on 403 rate limit exceeded
|
||||
//
|
||||
// This calls fn, expecting it to place its error in perr
|
||||
func (f *FsDrive) call(perr *error, fn func()) {
|
||||
for {
|
||||
f.beginCall()
|
||||
fn()
|
||||
if !f.endCall(*perr) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return again, err
|
||||
}
|
||||
|
||||
// parseParse parses a drive 'url'
|
||||
@@ -280,8 +191,9 @@ func (f *FsDrive) listAll(dirId string, title string, directoriesOnly bool, file
|
||||
OUTER:
|
||||
for {
|
||||
var files *drive.FileList
|
||||
f.call(&err, func() {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
files, err = list.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Couldn't list directory: %s", err)
|
||||
@@ -321,9 +233,9 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
return nil, fmt.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize)
|
||||
}
|
||||
|
||||
t, err := driveAuth.NewTransport(name)
|
||||
oAuthClient, err := oauthutil.NewClient(name, driveConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.Fatalf("Failed to configure drive: %v", err)
|
||||
}
|
||||
|
||||
root, err := parseDrivePath(path)
|
||||
@@ -332,41 +244,39 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &FsDrive{
|
||||
root: root,
|
||||
dirCache: newDirCache(),
|
||||
pacer: make(chan struct{}, 1),
|
||||
sleepTime: minSleep,
|
||||
name: name,
|
||||
root: root,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
|
||||
// Put the first pacing token in
|
||||
f.pacer <- struct{}{}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = t.Client()
|
||||
f.client = oAuthClient
|
||||
f.svc, err = drive.New(f.client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Couldn't create Drive client: %s", err)
|
||||
}
|
||||
|
||||
// Read About so we know the root path
|
||||
f.call(&err, func() {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
f.about, err = f.svc.About.Get().Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Couldn't read info about Drive: %s", err)
|
||||
}
|
||||
|
||||
// Find the Id of the true root and clear everything
|
||||
f.resetRoot()
|
||||
f.dirCache = dircache.New(root, f.about.RootFolderId, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.findRoot(false)
|
||||
err = f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := splitPath(root)
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, f.about.RootFolderId, &newF)
|
||||
newF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.findRoot(false)
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
@@ -379,7 +289,7 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
// return a Fs Limited to this object
|
||||
return fs.NewLimited(&newF, obj), nil
|
||||
}
|
||||
// fmt.Printf("Root id %s", f.rootId)
|
||||
// fmt.Printf("Root id %s", f.dirCache.RootID())
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -417,6 +327,40 @@ func (f *FsDrive) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathId
|
||||
func (f *FsDrive) FindLeaf(pathId, leaf string) (pathIdOut string, found bool, err error) {
|
||||
// Find the leaf in pathId
|
||||
found, err = f.listAll(pathId, leaf, true, false, func(item *drive.File) bool {
|
||||
if item.Title == leaf {
|
||||
pathIdOut = item.Id
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
return pathIdOut, found, err
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathId as parent and name leaf
|
||||
func (f *FsDrive) CreateDir(pathId, leaf string) (newId string, err error) {
|
||||
// fmt.Println("Making", path)
|
||||
// Define the metadata for the directory we are going to create.
|
||||
createInfo := &drive.File{
|
||||
Title: leaf,
|
||||
Description: leaf,
|
||||
MimeType: driveFolderType,
|
||||
Parents: []*drive.ParentReference{{Id: pathId}},
|
||||
}
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Insert(createInfo).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return info.Id, nil
|
||||
}
|
||||
|
||||
// Path should be directory path either "" or "path/"
|
||||
//
|
||||
// List the directory using a recursive list from the root
|
||||
@@ -522,172 +466,20 @@ func (f *FsDrive) listDirFull(dirId string, path string, out fs.ObjectsChan) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Splits a path into directory, leaf
|
||||
//
|
||||
// Path shouldn't start or end with a /
|
||||
//
|
||||
// If there are no slashes then directory will be "" and leaf = path
|
||||
func splitPath(path string) (directory, leaf string) {
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if lastSlash >= 0 {
|
||||
directory = path[:lastSlash]
|
||||
leaf = path[lastSlash+1:]
|
||||
} else {
|
||||
directory = ""
|
||||
leaf = path
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Finds the directory passed in returning the directory Id starting from pathId
|
||||
//
|
||||
// Path shouldn't start or end with a /
|
||||
//
|
||||
// If create is set it will make the directory if not found
|
||||
//
|
||||
// Algorithm:
|
||||
// Look in the cache for the path, if found return the pathId
|
||||
// If not found strip the last path off the path and recurse
|
||||
// Now have a parent directory id, so look in the parent for self and return it
|
||||
func (f *FsDrive) findDir(path string, create bool) (pathId string, err error) {
|
||||
pathId = f._findDirInCache(path)
|
||||
if pathId != "" {
|
||||
return
|
||||
}
|
||||
f.findDirLock.Lock()
|
||||
defer f.findDirLock.Unlock()
|
||||
return f._findDir(path, create)
|
||||
}
|
||||
|
||||
// Look for the root and in the cache - safe to call without the findDirLock
|
||||
func (f *FsDrive) _findDirInCache(path string) string {
|
||||
// fmt.Println("Finding",path,"create",create,"cache",cache)
|
||||
// If it is the root, then return it
|
||||
if path == "" {
|
||||
// fmt.Println("Root")
|
||||
return f.rootId
|
||||
}
|
||||
|
||||
// If it is in the cache then return it
|
||||
pathId, ok := f.dirCache.Get(path)
|
||||
if ok {
|
||||
// fmt.Println("Cache hit on", path)
|
||||
return pathId
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Unlocked findDir - must have findDirLock
|
||||
func (f *FsDrive) _findDir(path string, create bool) (pathId string, err error) {
|
||||
pathId = f._findDirInCache(path)
|
||||
if pathId != "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Split the path into directory, leaf
|
||||
directory, leaf := splitPath(path)
|
||||
|
||||
// Recurse and find pathId for directory
|
||||
pathId, err = f._findDir(directory, create)
|
||||
if err != nil {
|
||||
return pathId, err
|
||||
}
|
||||
|
||||
// Find the leaf in pathId
|
||||
found, err := f.listAll(pathId, leaf, true, false, func(item *drive.File) bool {
|
||||
if item.Title == leaf {
|
||||
pathId = item.Id
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return pathId, err
|
||||
}
|
||||
|
||||
// If not found create the directory if required or return an error
|
||||
if !found {
|
||||
if create {
|
||||
// fmt.Println("Making", path)
|
||||
// Define the metadata for the directory we are going to create.
|
||||
createInfo := &drive.File{
|
||||
Title: leaf,
|
||||
Description: leaf,
|
||||
MimeType: driveFolderType,
|
||||
Parents: []*drive.ParentReference{{Id: pathId}},
|
||||
}
|
||||
var info *drive.File
|
||||
f.call(&err, func() {
|
||||
info, err = f.svc.Files.Insert(createInfo).Do()
|
||||
})
|
||||
if err != nil {
|
||||
return pathId, fmt.Errorf("Failed to make directory: %v", err)
|
||||
}
|
||||
pathId = info.Id
|
||||
} else {
|
||||
return pathId, fmt.Errorf("Couldn't find directory: %q", path)
|
||||
}
|
||||
}
|
||||
|
||||
// Store the directory in the cache
|
||||
f.dirCache.Put(path, pathId)
|
||||
|
||||
// fmt.Println("Dir", path, "is", pathId)
|
||||
return pathId, nil
|
||||
}
|
||||
|
||||
// Finds the root directory if not already found
|
||||
//
|
||||
// Resets the root directory
|
||||
//
|
||||
// If create is set it will make the directory if not found
|
||||
func (f *FsDrive) findRoot(create bool) error {
|
||||
f.findRootLock.Lock()
|
||||
defer f.findRootLock.Unlock()
|
||||
if f.foundRoot {
|
||||
return nil
|
||||
}
|
||||
rootId, err := f.findDir(f.root, create)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.rootId = rootId
|
||||
f.dirCache.Flush()
|
||||
// Put the root directory in
|
||||
f.dirCache.Put("", f.rootId)
|
||||
f.foundRoot = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resets the root directory to the absolute root and clears the dirCache
|
||||
func (f *FsDrive) resetRoot() {
|
||||
f.findRootLock.Lock()
|
||||
defer f.findRootLock.Unlock()
|
||||
f.foundRoot = false
|
||||
f.dirCache.Flush()
|
||||
|
||||
// Put the true root in
|
||||
f.rootId = f.about.RootFolderId
|
||||
|
||||
// Put the root directory in
|
||||
f.dirCache.Put("", f.rootId)
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
func (f *FsDrive) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
err := f.findRoot(false)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
if f.root == "" && *driveFullList {
|
||||
err = f.listDirFull(f.rootId, "", out)
|
||||
err = f.listDirFull(f.dirCache.RootID(), "", out)
|
||||
} else {
|
||||
err = f.listDirRecursive(f.rootId, "", out)
|
||||
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
|
||||
}
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
@@ -703,12 +495,12 @@ func (f *FsDrive) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
err := f.findRoot(false)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
_, err := f.listAll(f.rootId, "", true, false, func(item *drive.File) bool {
|
||||
_, err := f.listAll(f.dirCache.RootID(), "", true, false, func(item *drive.File) bool {
|
||||
dir := &fs.Dir{
|
||||
Name: item.Title,
|
||||
Bytes: -1,
|
||||
@@ -727,23 +519,21 @@ func (f *FsDrive) ListDir() fs.DirChan {
|
||||
return out
|
||||
}
|
||||
|
||||
// Put the object
|
||||
// Creates a drive.File info from the parameters passed in and a half
|
||||
// finished FsObjectDrive which must have setMetaData called on it
|
||||
//
|
||||
// This assumes that the object doesn't not already exists - if you
|
||||
// call it when it does exist then it will create a duplicate. Call
|
||||
// object.Update() in this case.
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Used to create new objects
|
||||
func (f *FsDrive) createFileInfo(remote string, modTime time.Time, size int64) (*FsObjectDrive, *drive.File, error) {
|
||||
// Temporary FsObject under construction
|
||||
o := &FsObjectDrive{drive: f, remote: remote}
|
||||
o := &FsObjectDrive{
|
||||
drive: f,
|
||||
remote: remote,
|
||||
bytes: size,
|
||||
}
|
||||
|
||||
directory, leaf := splitPath(o.remote)
|
||||
directoryId, err := f.findDir(directory, true)
|
||||
leaf, directoryId, err := f.dirCache.FindPath(remote, true)
|
||||
if err != nil {
|
||||
return o, fmt.Errorf("Couldn't find or make directory: %s", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Define the metadata for the file we are going to create.
|
||||
@@ -754,18 +544,34 @@ func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64
|
||||
MimeType: fs.MimeType(o),
|
||||
ModifiedDate: modTime.Format(timeFormatOut),
|
||||
}
|
||||
return o, createInfo, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// This assumes that the object doesn't not already exists - if you
|
||||
// call it when it does exist then it will create a duplicate. Call
|
||||
// object.Update() in this case.
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
o, createInfo, err := f.createFileInfo(remote, modTime, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
if size == 0 || size < int64(driveUploadCutoff) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
// Don't retry, return a retry error instead
|
||||
f.beginCall()
|
||||
info, err = f.svc.Files.Insert(createInfo).Media(in).Do()
|
||||
if f.endCall(err) {
|
||||
return o, fs.RetryErrorf("Upload failed - retry: %s", err)
|
||||
}
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Insert(createInfo).Media(in).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return o, fmt.Errorf("Upload failed: %s", err)
|
||||
return o, err
|
||||
}
|
||||
} else {
|
||||
// Upload the file in chunks
|
||||
@@ -780,20 +586,21 @@ func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *FsDrive) Mkdir() error {
|
||||
return f.findRoot(true)
|
||||
return f.dirCache.FindRoot(true)
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsDrive) Rmdir() error {
|
||||
err := f.findRoot(false)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var children *drive.ChildList
|
||||
f.call(&err, func() {
|
||||
children, err = f.svc.Children.List(f.rootId).MaxResults(10).Do()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
children, err = f.svc.Children.List(f.dirCache.RootID()).MaxResults(10).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -803,18 +610,19 @@ func (f *FsDrive) Rmdir() error {
|
||||
}
|
||||
// Delete the directory if it isn't the root
|
||||
if f.root != "" {
|
||||
f.call(&err, func() {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
if *driveUseTrash {
|
||||
_, err = f.svc.Files.Trash(f.rootId).Do()
|
||||
_, err = f.svc.Files.Trash(f.dirCache.RootID()).Do()
|
||||
} else {
|
||||
err = f.svc.Files.Delete(f.rootId).Do()
|
||||
err = f.svc.Files.Delete(f.dirCache.RootID()).Do()
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
f.resetRoot()
|
||||
f.dirCache.ResetRoot()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -823,6 +631,40 @@ func (fs *FsDrive) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsDrive) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectDrive)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
o, createInfo, err := f.createFileInfo(remote, srcObj.ModTime(), srcObj.bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
err = o.drive.pacer.Call(func() (bool, error) {
|
||||
info, err = o.drive.svc.Files.Copy(srcObj.id, createInfo).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.setMetaData(info)
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
@@ -832,24 +674,97 @@ func (f *FsDrive) Purge() error {
|
||||
if f.root == "" {
|
||||
return fmt.Errorf("Can't purge root directory")
|
||||
}
|
||||
err := f.findRoot(false)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.call(&err, func() {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
if *driveUseTrash {
|
||||
_, err = f.svc.Files.Trash(f.rootId).Do()
|
||||
_, err = f.svc.Files.Trash(f.dirCache.RootID()).Do()
|
||||
} else {
|
||||
err = f.svc.Files.Delete(f.rootId).Do()
|
||||
err = f.svc.Files.Delete(f.dirCache.RootID()).Do()
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
f.resetRoot()
|
||||
f.dirCache.ResetRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (dstFs *FsDrive) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectDrive)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj, dstInfo, err := dstFs.createFileInfo(remote, srcObj.ModTime(), srcObj.bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
info, err := dstFs.svc.Files.Patch(srcObj.id, dstInfo).SetModifiedDate(true).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dstObj.setMetaData(info)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (dstFs *FsDrive) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*FsDrive)
|
||||
if !ok {
|
||||
fs.Debug(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check if destination exists
|
||||
dstFs.dirCache.ResetRoot()
|
||||
err := dstFs.dirCache.FindRoot(false)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Find ID of parent
|
||||
leaf, directoryId, err := dstFs.dirCache.FindPath(dstFs.root, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
patch := drive.File{
|
||||
Title: leaf,
|
||||
Parents: []*drive.ParentReference{{Id: directoryId}},
|
||||
}
|
||||
_, err = dstFs.svc.Files.Patch(srcFs.dirCache.RootID(), &patch).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcFs.dirCache.ResetRoot()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
@@ -895,11 +810,9 @@ func (o *FsObjectDrive) readMetaData() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
directory, leaf := splitPath(o.remote)
|
||||
directoryId, err := o.drive.findDir(directory, false)
|
||||
leaf, directoryId, err := o.drive.dirCache.FindPath(o.remote, false)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Couldn't find directory: %s", err)
|
||||
return fmt.Errorf("Couldn't find directory: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
found, err := o.drive.listAll(directoryId, leaf, false, true, func(item *drive.File) bool {
|
||||
@@ -938,7 +851,7 @@ func (o *FsObjectDrive) ModTime() time.Time {
|
||||
return modTime
|
||||
}
|
||||
|
||||
// Sets the modification time of the local fs object
|
||||
// Sets the modification time of the drive fs object
|
||||
func (o *FsObjectDrive) SetModTime(modTime time.Time) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
@@ -952,8 +865,9 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) {
|
||||
}
|
||||
// Set modified date
|
||||
var info *drive.File
|
||||
o.drive.call(&err, func() {
|
||||
err = o.drive.pacer.Call(func() (bool, error) {
|
||||
info, err = o.drive.svc.Files.Update(o.id, updateInfo).SetModifiedDate(true).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
@@ -980,8 +894,9 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
|
||||
}
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
var res *http.Response
|
||||
o.drive.call(&err, func() {
|
||||
err = o.drive.pacer.Call(func() (bool, error) {
|
||||
res, err = o.drive.client.Do(req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1009,13 +924,12 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro
|
||||
var info *drive.File
|
||||
if size == 0 || size < int64(driveUploadCutoff) {
|
||||
// Don't retry, return a retry error instead
|
||||
o.drive.beginCall()
|
||||
info, err = o.drive.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in).Do()
|
||||
if o.drive.endCall(err) {
|
||||
return fs.RetryErrorf("Update failed - retry: %s", err)
|
||||
}
|
||||
err = o.drive.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = o.drive.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update failed: %s", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Upload the file in chunks
|
||||
@@ -1031,17 +945,23 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro
|
||||
// Remove an object
|
||||
func (o *FsObjectDrive) Remove() error {
|
||||
var err error
|
||||
o.drive.call(&err, func() {
|
||||
err = o.drive.pacer.Call(func() (bool, error) {
|
||||
if *driveUseTrash {
|
||||
_, err = o.drive.svc.Files.Trash(o.id).Do()
|
||||
} else {
|
||||
err = o.drive.svc.Files.Delete(o.id).Do()
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsDrive{}
|
||||
var _ fs.Purger = &FsDrive{}
|
||||
var _ fs.Object = &FsObjectDrive{}
|
||||
var (
|
||||
_ fs.Fs = (*FsDrive)(nil)
|
||||
_ fs.Purger = (*FsDrive)(nil)
|
||||
_ fs.Copier = (*FsDrive)(nil)
|
||||
_ fs.Mover = (*FsDrive)(nil)
|
||||
_ fs.DirMover = (*FsDrive)(nil)
|
||||
_ fs.Object = (*FsObjectDrive)(nil)
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test Drive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: go run gen_tests.go or make gen_tests
|
||||
// Regenerate with: make gen_tests
|
||||
package drive_test
|
||||
|
||||
import (
|
||||
@@ -34,6 +34,9 @@ func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
|
||||
@@ -78,12 +78,13 @@ func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *dri
|
||||
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
var res *http.Response
|
||||
f.call(&err, func() {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.client.Do(req)
|
||||
if err == nil {
|
||||
defer googleapi.CloseBody(res)
|
||||
err = googleapi.CheckResponse(res)
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -203,19 +204,19 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
for try := 1; try <= maxTries; try++ {
|
||||
fs.Debug(rx.remote, "Sending chunk %d length %d, %d/%d", start, reqSize, try, maxTries)
|
||||
rx.f.beginCall()
|
||||
err = rx.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debug(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||
StatusCode, err = rx.transferChunk(start, buf)
|
||||
rx.f.endCall(err)
|
||||
again, err := shouldRetry(err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
goto success
|
||||
again = false
|
||||
err = nil
|
||||
}
|
||||
fs.Debug(rx.remote, "Retrying chunk %d/%d, code=%d, err=%v", try, maxTries, StatusCode, err)
|
||||
return again, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debug(rx.remote, "Failed to send chunk")
|
||||
return nil, fs.RetryErrorf("Chunk upload failed - retry: code=%d, err=%v", StatusCode, err)
|
||||
success:
|
||||
|
||||
start += reqSize
|
||||
}
|
||||
|
||||
@@ -5,27 +5,6 @@ package dropbox
|
||||
Limitations of dropbox
|
||||
|
||||
File system is case insensitive
|
||||
|
||||
FIXME Getting this sometimes
|
||||
Failed to copy: Upload failed: invalid character '<' looking for beginning of value
|
||||
This is a JSON decode error - from Update / UploadByChunk
|
||||
- Caused by 500 error from dropbox
|
||||
- See https://github.com/stacktic/dropbox/issues/1
|
||||
- Possibly confusing dropbox with excess concurrency?
|
||||
|
||||
FIXME implement timeouts - need to get "github.com/stacktic/dropbox"
|
||||
and hence "golang.org/x/oauth2" which uses DefaultTransport unless it
|
||||
is set in the context passed into .Client()
|
||||
|
||||
func (db *Dropbox) client() *http.Client {
|
||||
return db.config.Client(oauth2.NoContext, db.token)
|
||||
}
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient ContextKey
|
||||
|
||||
So pass in a context with HTTPClient set...
|
||||
*/
|
||||
|
||||
import (
|
||||
@@ -36,21 +15,33 @@ import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stacktic/dropbox"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneAppKey = "5jcck7diasz0rqy"
|
||||
rcloneAppSecret = "1n9m04y2zx7bf26"
|
||||
uploadChunkSize = 64 * 1024 // chunk size for upload
|
||||
rcloneAppSecret = "m8WRxJ6b1Z/Y25fDwJWS"
|
||||
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
|
||||
)
|
||||
|
||||
var (
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
// Chunks aren't buffered into memory though so can set large.
|
||||
uploadChunkSize = fs.SizeSuffix(128 * 1024 * 1024)
|
||||
maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.FsInfo{
|
||||
@@ -65,6 +56,7 @@ func init() {
|
||||
Help: "Dropbox App Secret - leave blank to use rclone's.",
|
||||
}},
|
||||
})
|
||||
pflag.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
||||
}
|
||||
|
||||
// Configuration helper - called after the user has put in the defaults
|
||||
@@ -99,6 +91,7 @@ func configHelper(name string) {
|
||||
|
||||
// FsDropbox represents a remote dropbox server
|
||||
type FsDropbox struct {
|
||||
name string // name of this remote
|
||||
db *dropbox.Dropbox // the connection to the dropbox server
|
||||
root string // the path we are working on
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
@@ -116,6 +109,16 @@ type FsObjectDropbox struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsDropbox) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// The root of the remote (as passed into NewFs)
|
||||
func (f *FsDropbox) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this FsDropbox to a string
|
||||
func (f *FsDropbox) String() string {
|
||||
return fmt.Sprintf("Dropbox root '%s'", f.root)
|
||||
@@ -131,7 +134,7 @@ func newDropbox(name string) *dropbox.Dropbox {
|
||||
}
|
||||
appSecret := fs.ConfigFile.MustValue(name, "app_secret")
|
||||
if appSecret == "" {
|
||||
appSecret = rcloneAppSecret
|
||||
appSecret = fs.Reveal(rcloneAppSecret)
|
||||
}
|
||||
|
||||
db.SetAppInfo(appKey, appSecret)
|
||||
@@ -141,15 +144,22 @@ func newDropbox(name string) *dropbox.Dropbox {
|
||||
|
||||
// NewFs contstructs an FsDropbox from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadChunkSize > maxUploadChunkSize {
|
||||
return nil, fmt.Errorf("Chunk size too big, must be < %v", maxUploadChunkSize)
|
||||
}
|
||||
db := newDropbox(name)
|
||||
f := &FsDropbox{
|
||||
db: db,
|
||||
name: name,
|
||||
db: db,
|
||||
}
|
||||
f.setRoot(root)
|
||||
|
||||
// Read the token from the config file
|
||||
token := fs.ConfigFile.MustValue(name, "token")
|
||||
|
||||
// Set our custom context which enables our custom transport for timeouts etc
|
||||
db.SetContext(oauthutil.Context())
|
||||
|
||||
// Authorize the client
|
||||
db.SetAccessToken(token)
|
||||
|
||||
@@ -402,6 +412,35 @@ func (f *FsDropbox) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsDropbox) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectDropbox)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj := &FsObjectDropbox{dropbox: f, remote: remote}
|
||||
|
||||
srcPath := srcObj.remotePath()
|
||||
dstPath := dstObj.remotePath()
|
||||
entry, err := f.db.Copy(srcPath, dstPath, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy failed: %s", err)
|
||||
}
|
||||
dstObj.setMetadataFromEntry(entry)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
@@ -413,6 +452,63 @@ func (f *FsDropbox) Purge() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (dstFs *FsDropbox) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectDropbox)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj := &FsObjectDropbox{dropbox: dstFs, remote: remote}
|
||||
|
||||
srcPath := srcObj.remotePath()
|
||||
dstPath := dstObj.remotePath()
|
||||
entry, err := dstFs.db.Move(srcPath, dstPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Move failed: %s", err)
|
||||
}
|
||||
dstObj.setMetadataFromEntry(entry)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (dstFs *FsDropbox) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*FsDropbox)
|
||||
if !ok {
|
||||
fs.Debug(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check if destination exists
|
||||
entry, err := dstFs.db.Metadata(dstFs.slashRoot, false, false, "", "", metadataLimit)
|
||||
if err == nil && !entry.IsDeleted {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = dstFs.db.Move(srcFs.slashRoot, dstFs.slashRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("MoveDir failed: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
@@ -542,7 +638,12 @@ func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
entry, err := o.dropbox.db.UploadByChunk(ioutil.NopCloser(in), uploadChunkSize, o.remotePath(), true, "")
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
fs.ErrorLog(o, "File name disallowed - not uploading")
|
||||
return nil
|
||||
}
|
||||
entry, err := o.dropbox.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Upload failed: %s", err)
|
||||
}
|
||||
@@ -557,6 +658,11 @@ func (o *FsObjectDropbox) Remove() error {
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsDropbox{}
|
||||
var _ fs.Purger = &FsDropbox{}
|
||||
var _ fs.Object = &FsObjectDropbox{}
|
||||
var (
|
||||
_ fs.Fs = (*FsDropbox)(nil)
|
||||
_ fs.Copier = (*FsDropbox)(nil)
|
||||
_ fs.Purger = (*FsDropbox)(nil)
|
||||
_ fs.Mover = (*FsDropbox)(nil)
|
||||
_ fs.DirMover = (*FsDropbox)(nil)
|
||||
_ fs.Object = (*FsObjectDropbox)(nil)
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test Dropbox filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: go run gen_tests.go or make gen_tests
|
||||
// Regenerate with: make gen_tests
|
||||
package dropbox_test
|
||||
|
||||
import (
|
||||
@@ -34,6 +34,9 @@ func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
|
||||
@@ -134,6 +134,13 @@ func (s *StatsInfo) ResetCounters() {
|
||||
s.transfers = 0
|
||||
}
|
||||
|
||||
// ResetErrors sets the errors count to 0
|
||||
func (s *StatsInfo) ResetErrors() {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.errors = 0
|
||||
}
|
||||
|
||||
// Errored returns whether there have been any errors
|
||||
func (s *StatsInfo) Errored() bool {
|
||||
s.lock.RLock()
|
||||
|
||||
41
fs/config.go
41
fs/config.go
@@ -4,6 +4,7 @@ package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
@@ -18,7 +19,7 @@ import (
|
||||
|
||||
"github.com/Unknwon/goconfig"
|
||||
"github.com/mreiferson/go-httpclient"
|
||||
"github.com/ogier/pflag"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -49,6 +50,8 @@ var (
|
||||
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
|
||||
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
|
||||
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
|
||||
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
|
||||
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
||||
bwLimit SizeSuffix
|
||||
)
|
||||
|
||||
@@ -113,9 +116,35 @@ func (x *SizeSuffix) Set(s string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (x *SizeSuffix) Type() string {
|
||||
return "int64"
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*SizeSuffix)(nil)
|
||||
|
||||
// Obscure a config value
|
||||
func Obscure(x string) string {
|
||||
y := []byte(x)
|
||||
for i := range y {
|
||||
y[i] ^= byte(i) ^ 0xAA
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(y)
|
||||
}
|
||||
|
||||
// Reveal a config value
|
||||
func Reveal(y string) string {
|
||||
x, err := base64.StdEncoding.DecodeString(y)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to reveal %q: %v", y, err)
|
||||
}
|
||||
for i := range x {
|
||||
x[i] ^= byte(i) ^ 0xAA
|
||||
}
|
||||
return string(x)
|
||||
}
|
||||
|
||||
// Filesystem config options
|
||||
type ConfigInfo struct {
|
||||
Verbose bool
|
||||
@@ -128,11 +157,13 @@ type ConfigInfo struct {
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
DumpHeaders bool
|
||||
DumpBodies bool
|
||||
}
|
||||
|
||||
// Transport returns an http.RoundTripper with the correct timeouts
|
||||
func (ci *ConfigInfo) Transport() http.RoundTripper {
|
||||
return &httpclient.Transport{
|
||||
t := &httpclient.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
MaxIdleConnsPerHost: ci.Checkers + ci.Transfers + 1,
|
||||
|
||||
@@ -155,6 +186,10 @@ func (ci *ConfigInfo) Transport() http.RoundTripper {
|
||||
// Write operation on the request connection.
|
||||
ReadWriteTimeout: ci.Timeout,
|
||||
}
|
||||
if ci.DumpHeaders || ci.DumpBodies {
|
||||
return NewLoggedTransport(t, ci.DumpBodies)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Transport returns an http.Client with the correct timeouts
|
||||
@@ -200,6 +235,8 @@ func LoadConfig() {
|
||||
Config.ConnectTimeout = *connectTimeout
|
||||
Config.CheckSum = *checkSum
|
||||
Config.SizeOnly = *sizeOnly
|
||||
Config.DumpHeaders = *dumpHeaders
|
||||
Config.DumpBodies = *dumpBodies
|
||||
|
||||
ConfigPath = *configFile
|
||||
|
||||
|
||||
@@ -55,3 +55,21 @@ func TestSizeSuffixSet(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReveal(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"2sTcyNrA", "potato"},
|
||||
} {
|
||||
got := Reveal(test.in)
|
||||
if got != test.want {
|
||||
t.Errorf("%q: want %q got %q", test.in, test.want, got)
|
||||
}
|
||||
if Obscure(got) != test.in {
|
||||
t.Errorf("%q: wasn't bidirectional", test.in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
65
fs/fs.go
65
fs/fs.go
@@ -24,6 +24,10 @@ var (
|
||||
fsRegistry []*FsInfo
|
||||
// Error returned by NewFs if not found in config file
|
||||
NotFoundInConfigFile = fmt.Errorf("Didn't find section in config file")
|
||||
ErrorCantCopy = fmt.Errorf("Can't copy object - incompatible remotes")
|
||||
ErrorCantMove = fmt.Errorf("Can't copy object - incompatible remotes")
|
||||
ErrorCantDirMove = fmt.Errorf("Can't copy directory - incompatible remotes")
|
||||
ErrorDirExists = fmt.Errorf("Can't copy directory - destination already exists")
|
||||
)
|
||||
|
||||
// Filesystem info
|
||||
@@ -63,6 +67,12 @@ func Register(info *FsInfo) {
|
||||
|
||||
// A Filesystem, describes the local filesystem and the remote object store
|
||||
type Fs interface {
|
||||
// The name of the remote (as passed into NewFs)
|
||||
Name() string
|
||||
|
||||
// The root of the remote (as passed into NewFs)
|
||||
Root() string
|
||||
|
||||
// String returns a description of the FS
|
||||
String() string
|
||||
|
||||
@@ -146,6 +156,43 @@ type Purger interface {
|
||||
Purge() error
|
||||
}
|
||||
|
||||
type Copier interface {
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
Copy(src Object, remote string) (Object, error)
|
||||
}
|
||||
|
||||
type Mover interface {
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
Move(src Object, remote string) (Object, error)
|
||||
}
|
||||
|
||||
type DirMover interface {
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
DirMove(src Fs) error
|
||||
}
|
||||
|
||||
// An optional interface for error as to whether the operation should be retried
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
@@ -175,6 +222,24 @@ func RetryErrorf(format string, a ...interface{}) error {
|
||||
return retryError(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// PlainRetryError is an error wrapped so it will retry
|
||||
type plainRetryError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (_ plainRetryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retry = plainRetryError{(error)(nil)}
|
||||
|
||||
// RetryError makes an error which indicates it would like to be retried
|
||||
func RetryError(err error) error {
|
||||
return plainRetryError{err}
|
||||
}
|
||||
|
||||
// A channel of Objects
|
||||
type ObjectsChan chan Object
|
||||
|
||||
|
||||
@@ -21,6 +21,16 @@ func NewLimited(fs Fs, objects ...Object) Fs {
|
||||
return f
|
||||
}
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *Limited) Name() string {
|
||||
return f.fs.Name() // return name of underlying remote
|
||||
}
|
||||
|
||||
// The root of the remote (as passed into NewFs)
|
||||
func (f *Limited) Root() string {
|
||||
return f.fs.Root() // return root of underlying remote
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Limited) String() string {
|
||||
return fmt.Sprintf("%s limited to %d objects", f.fs.String(), len(f.objects))
|
||||
@@ -76,7 +86,8 @@ func (f *Limited) Mkdir() error {
|
||||
|
||||
// Remove the directory (container, bucket) if empty
|
||||
func (f *Limited) Rmdir() error {
|
||||
return fmt.Errorf("Can't rmdir in limited fs")
|
||||
// Ignore this in a limited fs
|
||||
return nil
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
@@ -84,5 +95,23 @@ func (f *Limited) Precision() time.Duration {
|
||||
return f.fs.Precision()
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Limited) Copy(src Object, remote string) (Object, error) {
|
||||
fCopy, ok := f.fs.(Copier)
|
||||
if !ok {
|
||||
return nil, ErrorCantCopy
|
||||
}
|
||||
return fCopy.Copy(src, remote)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ Fs = &Limited{}
|
||||
var _ Copier = &Limited{}
|
||||
|
||||
54
fs/loghttp.go
Normal file
54
fs/loghttp.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// A logging http transport
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
)
|
||||
|
||||
const separator = "------------------------------------------------------------"
|
||||
|
||||
// An http transport which logs the traffic
|
||||
type loggedTransport struct {
|
||||
wrapped http.RoundTripper
|
||||
logBody bool
|
||||
}
|
||||
|
||||
// NewLoggedTransport wraps the transport passed in and logs all roundtrips
|
||||
// including the body if logBody is set.
|
||||
func NewLoggedTransport(transport http.RoundTripper, logBody bool) *loggedTransport {
|
||||
return &loggedTransport{
|
||||
wrapped: transport,
|
||||
logBody: logBody,
|
||||
}
|
||||
}
|
||||
|
||||
// CancelRequest cancels an in-flight request by closing its
|
||||
// connection. CancelRequest should only be called after RoundTrip has
|
||||
// returned.
|
||||
func (t *loggedTransport) CancelRequest(req *http.Request) {
|
||||
if wrapped, ok := t.wrapped.(interface {
|
||||
CancelRequest(*http.Request)
|
||||
}); ok {
|
||||
log.Printf("CANCEL REQUEST %v", req)
|
||||
wrapped.CancelRequest(req)
|
||||
}
|
||||
}
|
||||
|
||||
// RoundTrip implements the RoundTripper interface.
|
||||
func (t *loggedTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
buf, _ := httputil.DumpRequest(req, t.logBody)
|
||||
log.Println(separator)
|
||||
log.Println("HTTP REQUEST")
|
||||
log.Println(string(buf))
|
||||
log.Println(separator)
|
||||
resp, err = t.wrapped.RoundTrip(req)
|
||||
buf, _ = httputil.DumpResponse(resp, t.logBody)
|
||||
log.Println(separator)
|
||||
log.Println("HTTP RESPONSE")
|
||||
log.Println(string(buf))
|
||||
log.Println(separator)
|
||||
return resp, err
|
||||
}
|
||||
181
fs/operations.go
181
fs/operations.go
@@ -22,9 +22,13 @@ func CalculateModifyWindow(fs ...Fs) {
|
||||
if precision > Config.ModifyWindow {
|
||||
Config.ModifyWindow = precision
|
||||
}
|
||||
if precision == ModTimeNotSupported {
|
||||
Debug(f, "Modify window not supported")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
Debug(fs[0], "Modify window is %s\n", Config.ModifyWindow)
|
||||
Debug(fs[0], "Modify window is %s", Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Md5sumsEqual checks to see if src == dst, but ignores empty strings
|
||||
@@ -37,25 +41,34 @@ func Md5sumsEqual(src, dst string) bool {
|
||||
|
||||
// Check the two files to see if the MD5sums are the same
|
||||
//
|
||||
// Returns two bools, the first of which is equality and the second of
|
||||
// which is true if either of the MD5SUMs were unset.
|
||||
//
|
||||
// May return an error which will already have been logged
|
||||
//
|
||||
// If an error is returned it will return false
|
||||
func CheckMd5sums(src, dst Object) (bool, error) {
|
||||
// If an error is returned it will return equal as false
|
||||
func CheckMd5sums(src, dst Object) (equal bool, unset bool, err error) {
|
||||
srcMd5, err := src.Md5sum()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(src, "Failed to calculate src md5: %s", err)
|
||||
return false, err
|
||||
return false, false, err
|
||||
}
|
||||
if srcMd5 == "" {
|
||||
return true, true, nil
|
||||
}
|
||||
dstMd5, err := dst.Md5sum()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Failed to calculate dst md5: %s", err)
|
||||
return false, err
|
||||
return false, false, err
|
||||
}
|
||||
if dstMd5 == "" {
|
||||
return true, true, nil
|
||||
}
|
||||
// Debug("Src MD5 %s", srcMd5)
|
||||
// Debug("Dst MD5 %s", obj.Hash)
|
||||
return Md5sumsEqual(srcMd5, dstMd5), nil
|
||||
return Md5sumsEqual(srcMd5, dstMd5), false, nil
|
||||
}
|
||||
|
||||
// Checks to see if the src and dst objects are equal by looking at
|
||||
@@ -87,6 +100,10 @@ func Equal(src, dst Object) bool {
|
||||
|
||||
var srcModTime time.Time
|
||||
if !Config.CheckSum {
|
||||
if Config.ModifyWindow == ModTimeNotSupported {
|
||||
Debug(src, "Sizes identical")
|
||||
return true
|
||||
}
|
||||
// Size the same so check the mtime
|
||||
srcModTime = src.ModTime()
|
||||
dstModTime := dst.ModTime()
|
||||
@@ -102,7 +119,7 @@ func Equal(src, dst Object) bool {
|
||||
|
||||
// mtime is unreadable or different but size is the same so
|
||||
// check the MD5SUM
|
||||
same, _ := CheckMd5sums(src, dst)
|
||||
same, md5unset, _ := CheckMd5sums(src, dst)
|
||||
if !same {
|
||||
Debug(src, "Md5sums differ")
|
||||
return false
|
||||
@@ -114,7 +131,11 @@ func Equal(src, dst Object) bool {
|
||||
dst.SetModTime(srcModTime)
|
||||
}
|
||||
|
||||
Debug(src, "Size and MD5SUM of src and dst objects identical")
|
||||
if md5unset {
|
||||
Debug(src, "Size of src and dst objects identical")
|
||||
} else {
|
||||
Debug(src, "Size and MD5SUM of src and dst objects identical")
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -152,24 +173,40 @@ func Copy(f Fs, dst, src Object) {
|
||||
const maxTries = 10
|
||||
tries := 0
|
||||
doUpdate := dst != nil
|
||||
var err, inErr error
|
||||
tryAgain:
|
||||
in0, err := src.Open()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(src, "Failed to open: %s", err)
|
||||
return
|
||||
}
|
||||
in := NewAccount(in0) // account the transfer
|
||||
|
||||
var actionTaken string
|
||||
if doUpdate {
|
||||
actionTaken = "Copied (updated existing)"
|
||||
err = dst.Update(in, src.ModTime(), src.Size())
|
||||
// Try server side copy first - if has optional interface and
|
||||
// is same underlying remote
|
||||
actionTaken := "Copied (server side copy)"
|
||||
if fCopy, ok := f.(Copier); ok && src.Fs().Name() == f.Name() {
|
||||
var newDst Object
|
||||
newDst, err = fCopy.Copy(src, src.Remote())
|
||||
if err == nil {
|
||||
dst = newDst
|
||||
}
|
||||
} else {
|
||||
actionTaken = "Copied (new)"
|
||||
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
|
||||
err = ErrorCantCopy
|
||||
}
|
||||
// If can't server side copy, do it manually
|
||||
if err == ErrorCantCopy {
|
||||
var in0 io.ReadCloser
|
||||
in0, err = src.Open()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(src, "Failed to open: %s", err)
|
||||
return
|
||||
}
|
||||
in := NewAccount(in0) // account the transfer
|
||||
|
||||
if doUpdate {
|
||||
actionTaken = "Copied (updated existing)"
|
||||
err = dst.Update(in, src.ModTime(), src.Size())
|
||||
} else {
|
||||
actionTaken = "Copied (new)"
|
||||
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
|
||||
}
|
||||
inErr = in.Close()
|
||||
}
|
||||
inErr := in.Close()
|
||||
// Retry if err returned a retry error
|
||||
if r, ok := err.(Retry); ok && r.Retry() && tries < maxTries {
|
||||
tries++
|
||||
@@ -258,7 +295,7 @@ func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
|
||||
}
|
||||
|
||||
// Read Objects on in and copy them
|
||||
func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
func PairCopier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for pair := range in {
|
||||
src := pair.src
|
||||
@@ -272,6 +309,35 @@ func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
}
|
||||
}
|
||||
|
||||
// Read Objects on in and move them if possible, or copy them if not
|
||||
func PairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// See if we have Move available
|
||||
fdstMover, haveMover := fdst.(Mover)
|
||||
for pair := range in {
|
||||
src := pair.src
|
||||
dst := pair.dst
|
||||
Stats.Transferring(src)
|
||||
if Config.DryRun {
|
||||
Debug(src, "Not moving as --dry-run")
|
||||
} else if haveMover {
|
||||
// Delete destination if it exists
|
||||
if pair.dst != nil {
|
||||
err := dst.Remove()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Couldn't delete: %s", err)
|
||||
}
|
||||
}
|
||||
fdstMover.Move(src, src.Remote())
|
||||
Debug(src, "Moved")
|
||||
} else {
|
||||
Copy(fdst, pair.dst, src)
|
||||
}
|
||||
Stats.DoneTransferring(src)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete all the files passed in the channel
|
||||
func DeleteFiles(to_be_deleted ObjectsChan) {
|
||||
var wg sync.WaitGroup
|
||||
@@ -314,10 +380,22 @@ func readFilesMap(fs Fs) map[string]Object {
|
||||
return files
|
||||
}
|
||||
|
||||
// Returns true if fdst and fsrc point to the same underlying Fs
|
||||
func FsSame(fdst, fsrc Fs) bool {
|
||||
return fdst.Name() == fsrc.Name() && fdst.Root() == fsrc.Root()
|
||||
}
|
||||
|
||||
// Syncs fsrc into fdst
|
||||
//
|
||||
// If Delete is true then it deletes any files in fdst that aren't in fsrc
|
||||
func Sync(fdst, fsrc Fs, Delete bool) error {
|
||||
//
|
||||
// If DoMove is true then files will be moved instead of copied
|
||||
func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
|
||||
if FsSame(fdst, fsrc) {
|
||||
ErrorLog(fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
|
||||
err := fdst.Mkdir()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
@@ -343,7 +421,11 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
|
||||
var copierWg sync.WaitGroup
|
||||
copierWg.Add(Config.Transfers)
|
||||
for i := 0; i < Config.Transfers; i++ {
|
||||
go Copier(to_be_uploaded, fdst, &copierWg)
|
||||
if DoMove {
|
||||
go PairMover(to_be_uploaded, fdst, &copierWg)
|
||||
} else {
|
||||
go PairCopier(to_be_uploaded, fdst, &copierWg)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
@@ -370,7 +452,7 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
|
||||
// Delete files if asked
|
||||
if Delete {
|
||||
if Stats.Errored() {
|
||||
Log(fdst, "Not deleting files as there were IO errors")
|
||||
ErrorLog(fdst, "Not deleting files as there were IO errors")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -387,6 +469,49 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Syncs fsrc into fdst
|
||||
func Sync(fdst, fsrc Fs) error {
|
||||
return syncCopyMove(fdst, fsrc, true, false)
|
||||
}
|
||||
|
||||
// Copies fsrc into fdst
|
||||
func CopyDir(fdst, fsrc Fs) error {
|
||||
return syncCopyMove(fdst, fsrc, false, false)
|
||||
}
|
||||
|
||||
// Moves fsrc into fdst
|
||||
func MoveDir(fdst, fsrc Fs) error {
|
||||
if FsSame(fdst, fsrc) {
|
||||
ErrorLog(fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
|
||||
// First attempt to use DirMover
|
||||
if fdstDirMover, ok := fdst.(DirMover); ok && fsrc.Name() == fdst.Name() {
|
||||
err := fdstDirMover.DirMove(fsrc)
|
||||
Debug(fdst, "Using server side directory move")
|
||||
switch err {
|
||||
case ErrorCantDirMove, ErrorDirExists:
|
||||
Debug(fdst, "Server side directory move failed - fallback to copy/delete: %v", err)
|
||||
case nil:
|
||||
Debug(fdst, "Server side directory move succeeded")
|
||||
return nil
|
||||
default:
|
||||
Stats.Error()
|
||||
ErrorLog(fdst, "Server side directory move failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Now move the files
|
||||
err := syncCopyMove(fdst, fsrc, false, true)
|
||||
if err != nil || Stats.Errored() {
|
||||
ErrorLog(fdst, "Not deleting files as there were IO errors")
|
||||
return err
|
||||
}
|
||||
return Purge(fsrc)
|
||||
}
|
||||
|
||||
// Checks the files in fsrc and fdst according to Size and MD5SUM
|
||||
func Check(fdst, fsrc Fs) error {
|
||||
Log(fdst, "Building file list")
|
||||
@@ -444,7 +569,7 @@ func Check(fdst, fsrc Fs) error {
|
||||
ErrorLog(src, "Sizes differ")
|
||||
continue
|
||||
}
|
||||
same, err := CheckMd5sums(src, dst)
|
||||
same, _, err := CheckMd5sums(src, dst)
|
||||
Stats.DoneChecking(src)
|
||||
if err != nil {
|
||||
continue
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/ncw/rclone/fstest"
|
||||
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/drive"
|
||||
_ "github.com/ncw/rclone/dropbox"
|
||||
_ "github.com/ncw/rclone/googlecloudstorage"
|
||||
@@ -98,7 +99,7 @@ func TestCopyWithDryRun(t *testing.T) {
|
||||
WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
err := fs.Sync(fremote, flocal, false)
|
||||
err := fs.CopyDir(fremote, flocal)
|
||||
fs.Config.DryRun = false
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v", err)
|
||||
@@ -114,7 +115,7 @@ func TestCopyWithDryRun(t *testing.T) {
|
||||
|
||||
// Now without dry run
|
||||
func TestCopy(t *testing.T) {
|
||||
err := fs.Sync(fremote, flocal, false)
|
||||
err := fs.CopyDir(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
@@ -127,6 +128,28 @@ func TestCopy(t *testing.T) {
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Test a server side copy if possible, or the backup path if not
|
||||
func TestServerSideCopy(t *testing.T) {
|
||||
fremoteCopy, finaliseCopy, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open remote copy %q: %v", *RemoteName, err)
|
||||
}
|
||||
defer finaliseCopy()
|
||||
t.Logf("Server side copy (if possible) %v -> %v", fremote, fremoteCopy)
|
||||
|
||||
err = fs.CopyDir(fremoteCopy, fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Server Side Copy failed: %v", err)
|
||||
}
|
||||
|
||||
items := []fstest.Item{
|
||||
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremoteCopy, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
func TestLsd(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
err := fs.ListDir(fremote, &buf)
|
||||
@@ -154,7 +177,7 @@ func TestCopyAfterDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCopyRedownload(t *testing.T) {
|
||||
err := fs.Sync(flocal, fremote, false)
|
||||
err := fs.CopyDir(flocal, fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
@@ -184,7 +207,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(fremote, flocal, true)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Initial sync failed: %v", err)
|
||||
}
|
||||
@@ -207,7 +230,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
err = fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
@@ -238,7 +261,7 @@ func TestSyncSizeOnly(t *testing.T) {
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(fremote, flocal, true)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Initial sync failed: %v", err)
|
||||
}
|
||||
@@ -259,7 +282,7 @@ func TestSyncSizeOnly(t *testing.T) {
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
err = fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
@@ -282,7 +305,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Chtimes failed: %v", err)
|
||||
}
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
err = fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
@@ -295,7 +318,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
|
||||
func TestSyncAfterAddingAFile(t *testing.T) {
|
||||
WriteFile("potato", "------------------------------------------------------------", t3)
|
||||
err := fs.Sync(fremote, flocal, true)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
@@ -309,7 +332,7 @@ func TestSyncAfterAddingAFile(t *testing.T) {
|
||||
|
||||
func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
|
||||
WriteFile("potato", "smaller but same date", t3)
|
||||
err := fs.Sync(fremote, flocal, true)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
@@ -323,8 +346,16 @@ func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
|
||||
|
||||
// Sync after changing a file's contents, modtime but not length
|
||||
func TestSyncAfterChangingContentsOnly(t *testing.T) {
|
||||
if fremote.Precision() == fs.ModTimeNotSupported {
|
||||
t.Logf("ModTimeNotSupported so forcing file to be a different size")
|
||||
WriteFile("potato", "different size to make sure it syncs", t2)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
}
|
||||
WriteFile("potato", "SMALLER BUT SAME DATE", t2)
|
||||
err := fs.Sync(fremote, flocal, true)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
@@ -344,7 +375,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
|
||||
t.Fatalf("Remove failed: %v", err)
|
||||
}
|
||||
fs.Config.DryRun = true
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
err = fs.Sync(fremote, flocal)
|
||||
fs.Config.DryRun = false
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
@@ -364,7 +395,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
|
||||
|
||||
// Sync after removing a file and adding a file
|
||||
func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
|
||||
err := fs.Sync(fremote, flocal, true)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
@@ -376,6 +407,55 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Test a server side move if possible, or the backup path if not
|
||||
func TestServerSideMove(t *testing.T) {
|
||||
fremoteMove, finaliseMove, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open remote move %q: %v", *RemoteName, err)
|
||||
}
|
||||
defer finaliseMove()
|
||||
t.Logf("Server side move (if possible) %v -> %v", fremote, fremoteMove)
|
||||
|
||||
// Start with a copy
|
||||
err = fs.CopyDir(fremoteMove, fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Server Side Copy failed: %v", err)
|
||||
}
|
||||
|
||||
// Remove one file
|
||||
obj := fremoteMove.NewFsObject("potato2")
|
||||
if obj == nil {
|
||||
t.Fatalf("Failed to find potato2")
|
||||
}
|
||||
err = obj.Remove()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to remove object: %v", err)
|
||||
}
|
||||
|
||||
// Do server side move
|
||||
err = fs.MoveDir(fremoteMove, fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Server Side Move failed: %v", err)
|
||||
}
|
||||
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, fremote, items[:0], fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremoteMove, items, fs.Config.ModifyWindow)
|
||||
|
||||
// Move it back again, dst does not exist this time
|
||||
err = fs.MoveDir(fremote, fremoteMove)
|
||||
if err != nil {
|
||||
t.Fatalf("Server Side Move 2 failed: %v", err)
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremoteMove, items[:0], fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
func TestLs(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
err := fs.List(fremote, &buf)
|
||||
|
||||
@@ -8,6 +8,11 @@ TestS3:
|
||||
TestDrive:
|
||||
TestGoogleCloudStorage:
|
||||
TestDropbox:
|
||||
TestAmazonCloudDrive:
|
||||
"
|
||||
|
||||
REMOTES="
|
||||
TestAmazonCloudDrive:
|
||||
"
|
||||
|
||||
function test_remote {
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
package fs
|
||||
|
||||
const Version = "v1.18"
|
||||
const Version = "v1.20"
|
||||
|
||||
@@ -28,6 +28,7 @@ type Item struct {
|
||||
Md5sum string
|
||||
ModTime time.Time
|
||||
Size int64
|
||||
WinPath string
|
||||
}
|
||||
|
||||
// Checks the times are equal within the precision, returns the delta and a flag
|
||||
@@ -67,19 +68,22 @@ func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
|
||||
// Represents all items for checking
|
||||
type Items struct {
|
||||
byName map[string]*Item
|
||||
items []Item
|
||||
byName map[string]*Item
|
||||
byNameAlt map[string]*Item
|
||||
items []Item
|
||||
}
|
||||
|
||||
// Make an Items
|
||||
func NewItems(items []Item) *Items {
|
||||
is := &Items{
|
||||
byName: make(map[string]*Item),
|
||||
items: items,
|
||||
byName: make(map[string]*Item),
|
||||
byNameAlt: make(map[string]*Item),
|
||||
items: items,
|
||||
}
|
||||
// Fill up byName
|
||||
for i := range items {
|
||||
is.byName[items[i].Path] = &items[i]
|
||||
is.byNameAlt[items[i].WinPath] = &items[i]
|
||||
}
|
||||
return is
|
||||
}
|
||||
@@ -88,10 +92,14 @@ func NewItems(items []Item) *Items {
|
||||
func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
i, ok := is.byName[obj.Remote()]
|
||||
if !ok {
|
||||
t.Errorf("Unexpected file %q", obj.Remote())
|
||||
return
|
||||
i, ok = is.byNameAlt[obj.Remote()]
|
||||
if !ok {
|
||||
t.Errorf("Unexpected file %q", obj.Remote())
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(is.byName, obj.Remote())
|
||||
delete(is.byName, i.Path)
|
||||
delete(is.byName, i.WinPath)
|
||||
i.Check(t, obj, precision)
|
||||
}
|
||||
|
||||
@@ -108,7 +116,20 @@ func (is *Items) Done(t *testing.T) {
|
||||
// Checks the fs to see if it has the expected contents
|
||||
func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision time.Duration) {
|
||||
is := NewItems(items)
|
||||
for obj := range f.List() {
|
||||
oldErrors := fs.Stats.GetErrors()
|
||||
var objs []fs.Object
|
||||
for i := 1; i <= 5; i++ {
|
||||
objs = nil
|
||||
for obj := range f.List() {
|
||||
objs = append(objs, obj)
|
||||
}
|
||||
if len(objs) == len(items) {
|
||||
break
|
||||
}
|
||||
t.Logf("Sleeping for 1 second for list eventual consistency: %d/5", i)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
for _, obj := range objs {
|
||||
if obj == nil {
|
||||
t.Errorf("Unexpected nil in List()")
|
||||
continue
|
||||
@@ -116,6 +137,10 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision ti
|
||||
is.Find(t, obj, precision)
|
||||
}
|
||||
is.Done(t)
|
||||
// Don't notice an error when listing an empty directory
|
||||
if len(items) == 0 && oldErrors == 0 && fs.Stats.GetErrors() == 1 {
|
||||
fs.Stats.ResetErrors()
|
||||
}
|
||||
}
|
||||
|
||||
// Checks the fs to see if it has the expected contents
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
// Generic tests for testing the Fs and Object interfaces
|
||||
//
|
||||
// Run go generate to write the tests for the remotes
|
||||
|
||||
//go:generate go run gen_tests.go
|
||||
package fstests
|
||||
|
||||
import (
|
||||
@@ -30,6 +34,7 @@ var (
|
||||
file2 = fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
|
||||
Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`,
|
||||
WinPath: `hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ _ _/z.txt`,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -165,7 +170,7 @@ func TestFsListDirFile2(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
found := false
|
||||
for obj := range remote.ListDir() {
|
||||
if obj.Name != `hello? sausage` {
|
||||
if obj.Name != `hello? sausage` && obj.Name != `hello_ sausage` {
|
||||
t.Errorf("Found unexpected item %q", obj.Name)
|
||||
} else {
|
||||
found = true
|
||||
@@ -201,17 +206,18 @@ func TestFsListRoot(t *testing.T) {
|
||||
}
|
||||
// Should either find file1 and file2 or nothing
|
||||
found1 := false
|
||||
file1 := subRemoteLeaf + "/" + file1.Path
|
||||
f1 := subRemoteLeaf + "/" + file1.Path
|
||||
found2 := false
|
||||
file2 := subRemoteLeaf + "/" + file2.Path
|
||||
f2 := subRemoteLeaf + "/" + file2.Path
|
||||
f2Alt := subRemoteLeaf + "/" + file2.WinPath
|
||||
count := 0
|
||||
errors := fs.Stats.GetErrors()
|
||||
for obj := range rootRemote.List() {
|
||||
count++
|
||||
if obj.Remote() == file1 {
|
||||
if obj.Remote() == f1 {
|
||||
found1 = true
|
||||
}
|
||||
if obj.Remote() == file2 {
|
||||
if obj.Remote() == f2 || obj.Remote() == f2Alt {
|
||||
found2 = true
|
||||
}
|
||||
}
|
||||
@@ -228,7 +234,7 @@ func TestFsListRoot(t *testing.T) {
|
||||
}
|
||||
return
|
||||
}
|
||||
t.Errorf("Didn't find %q (%v) and %q (%v) or no files (count %d)", file1, found1, file2, found2, count)
|
||||
t.Errorf("Didn't find %q (%v) and %q (%v) or no files (count %d)", f1, found1, f2, found2, count)
|
||||
}
|
||||
|
||||
func TestFsListFile1(t *testing.T) {
|
||||
@@ -247,6 +253,130 @@ func TestFsListFile1and2(t *testing.T) {
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
|
||||
}
|
||||
|
||||
func TestFsCopy(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
|
||||
// Check have Copy
|
||||
_, ok := remote.(fs.Copier)
|
||||
if !ok {
|
||||
t.Skip("FS has no Copier interface")
|
||||
}
|
||||
|
||||
var file1Copy = file1
|
||||
file1Copy.Path += "-copy"
|
||||
|
||||
// do the copy
|
||||
src := findObject(t, file1.Path)
|
||||
dst, err := remote.(fs.Copier).Copy(src, file1Copy.Path)
|
||||
if err != nil {
|
||||
t.Errorf("Copy failed: %v", err)
|
||||
}
|
||||
|
||||
// check file exists in new listing
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file1, file2, file1Copy})
|
||||
|
||||
// Check dst lightly - list above has checked ModTime/Md5sum
|
||||
if dst.Remote() != file1Copy.Path {
|
||||
t.Errorf("object path: want %q got %q", file1Copy.Path, dst.Remote())
|
||||
}
|
||||
|
||||
// Delete copy
|
||||
err = dst.Remove()
|
||||
if err != nil {
|
||||
t.Fatal("Remove copy error", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFsMove(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
|
||||
// Check have Move
|
||||
_, ok := remote.(fs.Mover)
|
||||
if !ok {
|
||||
t.Skip("FS has no Mover interface")
|
||||
}
|
||||
|
||||
var file1Move = file1
|
||||
file1Move.Path += "-move"
|
||||
|
||||
// do the move
|
||||
src := findObject(t, file1.Path)
|
||||
dst, err := remote.(fs.Mover).Move(src, file1Move.Path)
|
||||
if err != nil {
|
||||
t.Fatalf("Move failed: %v", err)
|
||||
}
|
||||
|
||||
// check file exists in new listing
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file2, file1Move})
|
||||
|
||||
// Check dst lightly - list above has checked ModTime/Md5sum
|
||||
if dst.Remote() != file1Move.Path {
|
||||
t.Errorf("object path: want %q got %q", file1Move.Path, dst.Remote())
|
||||
}
|
||||
|
||||
// move it back
|
||||
src = findObject(t, file1Move.Path)
|
||||
_, err = remote.(fs.Mover).Move(src, file1.Path)
|
||||
if err != nil {
|
||||
t.Errorf("Move failed: %v", err)
|
||||
}
|
||||
|
||||
// check file exists in new listing
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file2, file1})
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func TestFsDirMove(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
|
||||
// Check have DirMove
|
||||
_, ok := remote.(fs.DirMover)
|
||||
if !ok {
|
||||
t.Skip("FS has no DirMover interface")
|
||||
}
|
||||
|
||||
// Check it can't move onto itself
|
||||
err := remote.(fs.DirMover).DirMove(remote)
|
||||
if err != fs.ErrorDirExists {
|
||||
t.Errorf("Expecting fs.ErrorDirExists got: %v", err)
|
||||
}
|
||||
|
||||
// new remote
|
||||
newRemote, removeNewRemote, err := fstest.RandomRemote(RemoteName, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create remote: %v", err)
|
||||
}
|
||||
defer removeNewRemote()
|
||||
|
||||
// try the move
|
||||
err = newRemote.(fs.DirMover).DirMove(remote)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to DirMove: %v", err)
|
||||
}
|
||||
|
||||
// check remotes
|
||||
// FIXME: Prints errors.
|
||||
fstest.CheckListing(t, remote, []fstest.Item{})
|
||||
fstest.CheckListing(t, newRemote, []fstest.Item{file2, file1})
|
||||
|
||||
// move it back
|
||||
err = remote.(fs.DirMover).DirMove(newRemote)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to DirMove: %v", err)
|
||||
}
|
||||
|
||||
// check remotes
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file2, file1})
|
||||
fstest.CheckListing(t, newRemote, []fstest.Item{})
|
||||
}
|
||||
|
||||
func TestFsRmdirFull(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
err := remote.Rmdir()
|
||||
|
||||
@@ -75,24 +75,17 @@ func init() {
|
||||
`
|
||||
|
||||
// Generate test file piping it through gofmt
|
||||
func generateTestProgram(t *template.Template, fns []string, Fsname string) {
|
||||
func generateTestProgram(t *template.Template, fns []string, Fsname, ObjectName string) {
|
||||
fsname := strings.ToLower(Fsname)
|
||||
TestName := "Test" + Fsname + ":"
|
||||
outfile := "../../" + fsname + "/" + fsname + "_test.go"
|
||||
// Find last capitalised group to be object name
|
||||
matcher := regexp.MustCompile(`([A-Z][a-z0-9]+)$`)
|
||||
matches := matcher.FindStringSubmatch(Fsname)
|
||||
if len(matches) == 0 {
|
||||
log.Fatalf("Couldn't find object name in %q", Fsname)
|
||||
}
|
||||
ObjectName := matches[1]
|
||||
|
||||
if fsname == "local" {
|
||||
TestName = ""
|
||||
}
|
||||
|
||||
data := Data{
|
||||
Regenerate: "go run gen_tests.go or make gen_tests",
|
||||
Regenerate: "make gen_tests",
|
||||
FsName: fsname,
|
||||
UpperFsName: Fsname,
|
||||
TestName: TestName,
|
||||
@@ -133,11 +126,12 @@ func generateTestProgram(t *template.Template, fns []string, Fsname string) {
|
||||
func main() {
|
||||
fns := findTestFunctions()
|
||||
t := template.Must(template.New("main").Parse(testProgram))
|
||||
generateTestProgram(t, fns, "Local")
|
||||
generateTestProgram(t, fns, "Swift")
|
||||
generateTestProgram(t, fns, "S3")
|
||||
generateTestProgram(t, fns, "Drive")
|
||||
generateTestProgram(t, fns, "GoogleCloudStorage")
|
||||
generateTestProgram(t, fns, "Dropbox")
|
||||
generateTestProgram(t, fns, "Local", "Local")
|
||||
generateTestProgram(t, fns, "Swift", "Swift")
|
||||
generateTestProgram(t, fns, "S3", "S3")
|
||||
generateTestProgram(t, fns, "Drive", "Drive")
|
||||
generateTestProgram(t, fns, "GoogleCloudStorage", "Storage")
|
||||
generateTestProgram(t, fns, "Dropbox", "Dropbox")
|
||||
generateTestProgram(t, fns, "AmazonCloudDrive", "Acd")
|
||||
log.Printf("Done")
|
||||
}
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
// Common authentication between Google Drive and Google Cloud Storage
|
||||
package googleauth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"code.google.com/p/goauth2/oauth"
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// A token cache to save the token in the config file section named
|
||||
type TokenCache string
|
||||
|
||||
// Get the token from the config file - returns an error if it isn't present
|
||||
func (name TokenCache) Token() (*oauth.Token, error) {
|
||||
tokenString, err := fs.ConfigFile.GetValue(string(name), "token")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tokenString == "" {
|
||||
return nil, fmt.Errorf("Empty token found - please reconfigure")
|
||||
}
|
||||
token := new(oauth.Token)
|
||||
err = json.Unmarshal([]byte(tokenString), token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return token, nil
|
||||
|
||||
}
|
||||
|
||||
// Save the token to the config file
|
||||
//
|
||||
// This saves the config file if it changes
|
||||
func (name TokenCache) PutToken(token *oauth.Token) error {
|
||||
tokenBytes, err := json.Marshal(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tokenString := string(tokenBytes)
|
||||
old := fs.ConfigFile.MustValue(string(name), "token")
|
||||
if tokenString != old {
|
||||
fs.ConfigFile.SetValue(string(name), "token", tokenString)
|
||||
fs.SaveConfig()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Auth contains information to authenticate an app against google services
|
||||
type Auth struct {
|
||||
Scope string
|
||||
DefaultClientId string
|
||||
DefaultClientSecret string
|
||||
}
|
||||
|
||||
// Makes a new transport using authorisation from the config
|
||||
//
|
||||
// Doesn't have a token yet
|
||||
func (auth *Auth) newTransport(name string) (*oauth.Transport, error) {
|
||||
clientId := fs.ConfigFile.MustValue(name, "client_id")
|
||||
if clientId == "" {
|
||||
clientId = auth.DefaultClientId
|
||||
}
|
||||
clientSecret := fs.ConfigFile.MustValue(name, "client_secret")
|
||||
if clientSecret == "" {
|
||||
clientSecret = auth.DefaultClientSecret
|
||||
}
|
||||
|
||||
// Settings for authorization.
|
||||
var config = &oauth.Config{
|
||||
ClientId: clientId,
|
||||
ClientSecret: clientSecret,
|
||||
Scope: auth.Scope,
|
||||
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
|
||||
AuthURL: "https://accounts.google.com/o/oauth2/auth",
|
||||
TokenURL: "https://accounts.google.com/o/oauth2/token",
|
||||
TokenCache: TokenCache(name),
|
||||
}
|
||||
|
||||
t := &oauth.Transport{
|
||||
Config: config,
|
||||
Transport: fs.Config.Transport(),
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Makes a new transport using authorisation from the config with token
|
||||
func (auth *Auth) NewTransport(name string) (*oauth.Transport, error) {
|
||||
t, err := auth.newTransport(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try to pull the token from the cache; if this fails, we need to get one.
|
||||
token, err := t.Config.TokenCache.Token()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get token: %s", err)
|
||||
}
|
||||
t.Token = token
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Configuration helper - called after the user has put in the defaults
|
||||
func (auth *Auth) Config(name string) {
|
||||
// See if already have a token
|
||||
tokenString := fs.ConfigFile.MustValue(name, "token")
|
||||
if tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !fs.Confirm() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Get a transport
|
||||
t, err := auth.newTransport(name)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't make transport: %v", err)
|
||||
}
|
||||
|
||||
// Generate a URL for the user to visit for authorization.
|
||||
authUrl := t.Config.AuthCodeURL("state")
|
||||
fmt.Printf("Go to the following link in your browser\n")
|
||||
fmt.Printf("%s\n", authUrl)
|
||||
fmt.Printf("Log in, then type paste the token that is returned in the browser here\n")
|
||||
|
||||
// Read the code, and exchange it for a token.
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := fs.ReadLine()
|
||||
_, err = t.Exchange(authCode)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get token: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -17,22 +17,25 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/storage/v1"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/googleauth"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientId = "202264815644.apps.googleusercontent.com"
|
||||
rcloneClientSecret = "X4Z3ca8xfWDb1Voo-F9a7ZxJ"
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
rcloneClientSecret = "8p/yms3OlNXE9OTDl/HLypf9gdiJ5cT3"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
metaMtime = "mtime" // key to store mtime under in metadata
|
||||
@@ -41,10 +44,12 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageAuth = &googleauth.Auth{
|
||||
Scope: storage.DevstorageFullControlScope,
|
||||
DefaultClientId: rcloneClientId,
|
||||
DefaultClientSecret: rcloneClientSecret,
|
||||
storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageFullControlScope},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.Reveal(rcloneClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -54,7 +59,10 @@ func init() {
|
||||
Name: "google cloud storage",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
storageAuth.Config(name)
|
||||
err := oauthutil.Config(name, storageConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "client_id",
|
||||
@@ -112,6 +120,7 @@ func init() {
|
||||
|
||||
// FsStorage represents a remote storage server
|
||||
type FsStorage struct {
|
||||
name string // name of this remote
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
@@ -135,6 +144,19 @@ type FsObjectStorage struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsStorage) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// The root of the remote (as passed into NewFs)
|
||||
func (f *FsStorage) Root() string {
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this FsStorage to a string
|
||||
func (f *FsStorage) String() string {
|
||||
if f.root == "" {
|
||||
@@ -160,9 +182,9 @@ func parsePath(path string) (bucket, directory string, err error) {
|
||||
|
||||
// NewFs contstructs an FsStorage from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
t, err := storageAuth.NewTransport(name)
|
||||
oAuthClient, err := oauthutil.NewClient(name, storageConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.Fatalf("Failed to configure Google Cloud Storage: %v", err)
|
||||
}
|
||||
|
||||
bucket, directory, err := parsePath(root)
|
||||
@@ -171,6 +193,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &FsStorage{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
projectNumber: fs.ConfigFile.MustValue(name, "project_number"),
|
||||
@@ -185,7 +208,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = t.Client()
|
||||
f.client = oAuthClient
|
||||
f.svc, err = storage.New(f.client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Couldn't create Google Cloud Storage client: %s", err)
|
||||
@@ -394,6 +417,38 @@ func (fs *FsStorage) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsStorage) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectStorage)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj := &FsObjectStorage{storage: f, remote: remote}
|
||||
|
||||
srcBucket := srcObj.storage.bucket
|
||||
srcObject := srcObj.storage.root + srcObj.remote
|
||||
dstBucket := f.bucket
|
||||
dstObject := f.root + remote
|
||||
newObject, err := f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Set the metadata for the new object while we have it
|
||||
dstObj.setMetaData(newObject)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
@@ -571,4 +626,5 @@ func (o *FsObjectStorage) Remove() error {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsStorage{}
|
||||
var _ fs.Copier = &FsStorage{}
|
||||
var _ fs.Object = &FsObjectStorage{}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: go run gen_tests.go or make gen_tests
|
||||
// Regenerate with: make gen_tests
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
@@ -34,6 +34,9 @@ func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
|
||||
242
local/local.go
242
local/local.go
@@ -1,11 +1,6 @@
|
||||
// Local filesystem interface
|
||||
package local
|
||||
|
||||
// Note that all rclone paths should be / separated. Anything coming
|
||||
// from the filepath module will have \ separators on windows so
|
||||
// should be converted using filepath.ToSlash. Windows is quite happy
|
||||
// with / separators so there is no need to convert them back.
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
@@ -14,13 +9,15 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -33,6 +30,7 @@ func init() {
|
||||
|
||||
// FsLocal represents a local filesystem rooted at root
|
||||
type FsLocal struct {
|
||||
name string // the name of the remote
|
||||
root string // The root directory
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
@@ -50,19 +48,22 @@ type FsObjectLocal struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFs contstructs an FsLocal from the path
|
||||
// NewFs constructs an FsLocal from the path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = filepath.ToSlash(path.Clean(root))
|
||||
var err error
|
||||
|
||||
f := &FsLocal{
|
||||
root: root,
|
||||
name: name,
|
||||
warned: make(map[string]struct{}),
|
||||
}
|
||||
f.root = filterPath(f.cleanUtf8(root))
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := os.Lstat(f.root)
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
// It is a file, so use the parent as the root
|
||||
remote := path.Base(root)
|
||||
f.root = path.Dir(root)
|
||||
var remote string
|
||||
f.root, remote = getDirFile(f.root)
|
||||
obj := f.NewFsObject(remote)
|
||||
// return a Fs Limited to this object
|
||||
return fs.NewLimited(f, obj), nil
|
||||
@@ -70,24 +71,39 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsLocal) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// The root of the remote (as passed into NewFs)
|
||||
func (f *FsLocal) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this FsLocal to a string
|
||||
func (f *FsLocal) String() string {
|
||||
return fmt.Sprintf("Local file system at %s", f.root)
|
||||
}
|
||||
|
||||
// newFsObject makes a half completed FsObjectLocal
|
||||
func (f *FsLocal) newFsObject(remote string) *FsObjectLocal {
|
||||
remote = filepath.ToSlash(remote)
|
||||
dstPath := filterPath(filepath.Join(f.root, f.cleanUtf8(remote)))
|
||||
return &FsObjectLocal{local: f, remote: remote, path: dstPath}
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
|
||||
remote = filepath.ToSlash(remote)
|
||||
path := path.Join(f.root, remote)
|
||||
o := &FsObjectLocal{local: f, remote: remote, path: path}
|
||||
o := f.newFsObject(remote)
|
||||
if info != nil {
|
||||
o.info = info
|
||||
} else {
|
||||
err := o.lstat()
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to stat %s: %s", path, err)
|
||||
fs.Debug(o, "Failed to stat %s: %s", o.path, err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -143,14 +159,40 @@ func (f *FsLocal) List() fs.ObjectsChan {
|
||||
//
|
||||
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
|
||||
func (f *FsLocal) cleanUtf8(name string) string {
|
||||
if utf8.ValidString(name) {
|
||||
return name
|
||||
if !utf8.ValidString(name) {
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
name = string([]rune(name))
|
||||
}
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
|
||||
f.warned[name] = struct{}{}
|
||||
if runtime.GOOS == "windows" {
|
||||
var name2 string
|
||||
if strings.HasPrefix(name, `\\?\`) {
|
||||
name2 = `\\?\`
|
||||
strings.TrimPrefix(name, `\\?\`)
|
||||
}
|
||||
if strings.HasPrefix(name, `//?/`) {
|
||||
name2 = `//?/`
|
||||
strings.TrimPrefix(name, `//?/`)
|
||||
}
|
||||
name2 += strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case '<', '>', '"', '|', '?', '*', '&':
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}, name)
|
||||
|
||||
if name2 != name {
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
name = name2
|
||||
}
|
||||
}
|
||||
return string([]rune(name))
|
||||
return name
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
@@ -172,7 +214,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
|
||||
Count: 0,
|
||||
}
|
||||
// Go down the tree to count the files and directories
|
||||
dirpath := path.Join(f.root, item.Name())
|
||||
dirpath := filterPath(filepath.Join(f.root, item.Name()))
|
||||
err := filepath.Walk(dirpath, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
@@ -198,9 +240,8 @@ func (f *FsLocal) ListDir() fs.DirChan {
|
||||
|
||||
// Puts the FsObject to the local filesystem
|
||||
func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
dstPath := path.Join(f.root, remote)
|
||||
// Temporary FsObject under construction - info filled in by Update()
|
||||
o := &FsObjectLocal{local: f, remote: remote, path: dstPath}
|
||||
o := f.newFsObject(remote)
|
||||
err := o.Update(in, modTime, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -210,6 +251,7 @@ func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *FsLocal) Mkdir() error {
|
||||
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
||||
return os.MkdirAll(f.root, 0777)
|
||||
}
|
||||
|
||||
@@ -296,6 +338,90 @@ func (f *FsLocal) Purge() error {
|
||||
return os.RemoveAll(f.root)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (dstFs *FsLocal) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectLocal)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj := dstFs.newFsObject(remote)
|
||||
|
||||
// Check it is a file if it exists
|
||||
err := dstObj.lstat()
|
||||
if os.IsNotExist(err) {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if !dstObj.info.Mode().IsRegular() {
|
||||
// It isn't a file
|
||||
return nil, fmt.Errorf("Can't move file onto non-file")
|
||||
}
|
||||
|
||||
// Create destination
|
||||
err = dstObj.mkdirAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = os.Rename(srcObj.path, dstObj.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update the info
|
||||
err = dstObj.lstat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (dstFs *FsLocal) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*FsLocal)
|
||||
if !ok {
|
||||
fs.Debug(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
// Check if source exists
|
||||
sstat, err := os.Lstat(srcFs.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// And is a directory
|
||||
if !sstat.IsDir() {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check if destination exists
|
||||
_, err = os.Lstat(dstFs.root)
|
||||
if !os.IsNotExist(err) {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Do the move
|
||||
return os.Rename(srcFs.root, dstFs.root)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
@@ -426,10 +552,15 @@ func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// mkdirAll makes all the directories needed to store the object
|
||||
func (o *FsObjectLocal) mkdirAll() error {
|
||||
dir, _ := getDirFile(o.path)
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
// Update the object from in with modTime and size
|
||||
func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
dir := path.Dir(o.path)
|
||||
err := os.MkdirAll(dir, 0777)
|
||||
err := o.mkdirAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -474,7 +605,66 @@ func (o *FsObjectLocal) Remove() error {
|
||||
return os.Remove(o.path)
|
||||
}
|
||||
|
||||
// Return the current directory and file from a path
|
||||
// Assumes os.PathSeparator is used.
|
||||
func getDirFile(s string) (string, string) {
|
||||
i := strings.LastIndex(s, string(os.PathSeparator))
|
||||
return s[:i], s[i+1:]
|
||||
}
|
||||
|
||||
func filterPath(s string) string {
|
||||
s = filepath.Clean(s)
|
||||
if runtime.GOOS == "windows" {
|
||||
s = strings.Replace(s, `/`, `\`, -1)
|
||||
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to UNC
|
||||
return uncPath(s)
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Pattern to match a windows absolute path: c:\temp path.
|
||||
var isAbsWinDrive = regexp.MustCompile(`[a-zA-Z]\:\\`)
|
||||
|
||||
// uncPath converts an absolute Windows path
|
||||
// to a UNC long path.
|
||||
func uncPath(s string) string {
|
||||
// UNC can NOT use "/", so convert all to "\"
|
||||
s = strings.Replace(s, `/`, `\`, -1)
|
||||
|
||||
// If prefix is "\\", we already have a UNC path or server.
|
||||
if strings.HasPrefix(s, `\\`) {
|
||||
// If already long path, just keep it
|
||||
if strings.HasPrefix(s, `\\?\`) {
|
||||
return s
|
||||
}
|
||||
// Trim "//" from path and add UNC prefix.
|
||||
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
|
||||
}
|
||||
if isAbsWinDrive.Match([]byte(s)) {
|
||||
return `\\?\` + s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsLocal{}
|
||||
var _ fs.Purger = &FsLocal{}
|
||||
var _ fs.Mover = &FsLocal{}
|
||||
var _ fs.DirMover = &FsLocal{}
|
||||
var _ fs.Object = &FsObjectLocal{}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test Local filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: go run gen_tests.go or make gen_tests
|
||||
// Regenerate with: make gen_tests
|
||||
package local_test
|
||||
|
||||
import (
|
||||
@@ -34,6 +34,9 @@ func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
|
||||
52
local/tests_test.go
Normal file
52
local/tests_test.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var uncTestPaths = []string{
|
||||
"C:\\Ba*d\\P|a?t<h>\\Windows\\Folder",
|
||||
"C:/Ba*d/P|a?t<h>/Windows\\Folder",
|
||||
"C:\\Windows\\Folder",
|
||||
"\\\\?\\C:\\Windows\\Folder",
|
||||
"//?/C:/Windows/Folder",
|
||||
"\\\\?\\UNC\\server\\share\\Desktop",
|
||||
"\\\\?\\unC\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"\\\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"C:\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"C:\\AbsoluteToRoot\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"\\\\server\\share\\Desktop",
|
||||
"\\\\?\\UNC\\\\share\\folder\\Desktop",
|
||||
"\\\\server\\share",
|
||||
}
|
||||
|
||||
var uncTestPathsResults = []string{
|
||||
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\?\UNC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\?\C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\?\C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
`\\?\UNC\\share\folder\Desktop`,
|
||||
`\\?\UNC\server\share`,
|
||||
}
|
||||
|
||||
// Test that UNC paths are converted.
|
||||
func TestUncPaths(t *testing.T) {
|
||||
for i, p := range uncTestPaths {
|
||||
unc := uncPath(p)
|
||||
if unc != uncTestPathsResults[i] {
|
||||
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
|
||||
}
|
||||
// Test we don't add more.
|
||||
unc = uncPath(unc)
|
||||
if unc != uncTestPathsResults[i] {
|
||||
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,11 +16,13 @@ docs = [
|
||||
"about.md",
|
||||
"install.md",
|
||||
"docs.md",
|
||||
"overview.md",
|
||||
"drive.md",
|
||||
"s3.md",
|
||||
"swift.md",
|
||||
"dropbox.md",
|
||||
"googlecloudstorage.md",
|
||||
"amazonclouddrive.md",
|
||||
"local.md",
|
||||
"changelog.md",
|
||||
"bugs.md",
|
||||
@@ -33,6 +35,8 @@ docs = [
|
||||
# Docs which aren't made into outfile
|
||||
ignore_docs = [
|
||||
"downloads.md",
|
||||
"privacy.md",
|
||||
"donate.md",
|
||||
]
|
||||
|
||||
def read_doc(doc):
|
||||
|
||||
10
notes.txt
10
notes.txt
@@ -57,6 +57,16 @@ Ideas
|
||||
* control times sync (which is slow with some remotes) with -a --archive flag?
|
||||
* Copy a glob pattern - could do with LimitedFs
|
||||
|
||||
Make an encryption layer.
|
||||
|
||||
This would layer over the source FS to
|
||||
* decrypt all gets
|
||||
* encrypt all puts
|
||||
* encrypt file names in list
|
||||
* decrypt them in list
|
||||
|
||||
Would like to be able to see unencrypted file names in remote though? How? Or is that two encryption layers..?
|
||||
|
||||
Bugs
|
||||
* Non verbose - not sure number transferred got counted up? CHECK
|
||||
* When doing copy it recurses the whole of the destination FS which isn't necessary
|
||||
|
||||
296
oauthutil/oauthutil.go
Normal file
296
oauthutil/oauthutil.go
Normal file
@@ -0,0 +1,296 @@
|
||||
package oauthutil
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
// configKey is the key used to store the token under
|
||||
configKey = "token"
|
||||
|
||||
// TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization
|
||||
// code should be returned in the title bar of the browser, with the page text
|
||||
// prompting the user to copy the code and paste it in the application.
|
||||
TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob"
|
||||
|
||||
// BindAddress is binding for local webserver when active
|
||||
bindAddress = "127.0.0.1:53682"
|
||||
|
||||
// RedirectURL is redirect to local webserver when active
|
||||
RedirectURL = "http://" + bindAddress + "/"
|
||||
)
|
||||
|
||||
// oldToken contains an end-user's tokens.
|
||||
// This is the data you must store to persist authentication.
|
||||
//
|
||||
// From the original code.google.com/p/goauth2/oauth package - used
|
||||
// for backwards compatibility in the rclone config file
|
||||
type oldToken struct {
|
||||
AccessToken string
|
||||
RefreshToken string
|
||||
Expiry time.Time
|
||||
}
|
||||
|
||||
// getToken returns the token saved in the config file under
|
||||
// section name.
|
||||
func getToken(name string) (*oauth2.Token, error) {
|
||||
tokenString, err := fs.ConfigFile.GetValue(string(name), configKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tokenString == "" {
|
||||
return nil, fmt.Errorf("Empty token found - please run rclone config again")
|
||||
}
|
||||
token := new(oauth2.Token)
|
||||
err = json.Unmarshal([]byte(tokenString), token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// if has data then return it
|
||||
if token.AccessToken != "" && token.RefreshToken != "" {
|
||||
return token, nil
|
||||
}
|
||||
// otherwise try parsing as oldToken
|
||||
oldtoken := new(oldToken)
|
||||
err = json.Unmarshal([]byte(tokenString), oldtoken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Fill in result into new token
|
||||
token.AccessToken = oldtoken.AccessToken
|
||||
token.RefreshToken = oldtoken.RefreshToken
|
||||
token.Expiry = oldtoken.Expiry
|
||||
// Save new format in config file
|
||||
err = putToken(name, token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// putToken stores the token in the config file
|
||||
//
|
||||
// This saves the config file if it changes
|
||||
func putToken(name string, token *oauth2.Token) error {
|
||||
tokenBytes, err := json.Marshal(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tokenString := string(tokenBytes)
|
||||
old := fs.ConfigFile.MustValue(name, configKey)
|
||||
if tokenString != old {
|
||||
fs.ConfigFile.SetValue(name, configKey, tokenString)
|
||||
fs.SaveConfig()
|
||||
fs.Debug(name, "Saving new token in config file")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// tokenSource stores updated tokens in the config file
|
||||
type tokenSource struct {
|
||||
Name string
|
||||
TokenSource oauth2.TokenSource
|
||||
OldToken oauth2.Token
|
||||
}
|
||||
|
||||
// Token returns a token or an error.
|
||||
// Token must be safe for concurrent use by multiple goroutines.
|
||||
// The returned Token must not be modified.
|
||||
//
|
||||
// This saves the token in the config file if it has changed
|
||||
func (ts *tokenSource) Token() (*oauth2.Token, error) {
|
||||
token, err := ts.TokenSource.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if *token != ts.OldToken {
|
||||
putToken(ts.Name, token)
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ oauth2.TokenSource = (*tokenSource)(nil)
|
||||
|
||||
// Context returns a context with our HTTP Client baked in for oauth2
|
||||
func Context() context.Context {
|
||||
return context.WithValue(nil, oauth2.HTTPClient, fs.Config.Client())
|
||||
}
|
||||
|
||||
// NewClient gets a token from the config file and configures a Client
|
||||
// with it
|
||||
func NewClient(name string, config *oauth2.Config) (*http.Client, error) {
|
||||
token, err := getToken(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set our own http client in the context
|
||||
ctx := Context()
|
||||
|
||||
// Wrap the TokenSource in our TokenSource which saves changed
|
||||
// tokens in the config file
|
||||
ts := &tokenSource{
|
||||
Name: name,
|
||||
OldToken: *token,
|
||||
TokenSource: config.TokenSource(ctx, token),
|
||||
}
|
||||
return oauth2.NewClient(ctx, ts), nil
|
||||
|
||||
}
|
||||
|
||||
// Config does the initial creation of the token
|
||||
//
|
||||
// It may run an internal webserver to receive the results
|
||||
func Config(name string, config *oauth2.Config) error {
|
||||
// See if already have a token
|
||||
tokenString := fs.ConfigFile.MustValue(name, "token")
|
||||
if tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !fs.Confirm() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Detect whether we should use internal web server
|
||||
useWebServer := false
|
||||
switch config.RedirectURL {
|
||||
case RedirectURL:
|
||||
useWebServer = true
|
||||
case TitleBarRedirectURL:
|
||||
fmt.Printf("Use auto config?\n")
|
||||
fmt.Printf(" * Say Y if not sure\n")
|
||||
fmt.Printf(" * Say N if you are working on a remote or headless machine\n")
|
||||
useWebServer = fs.Confirm()
|
||||
// copy the config and set to use the internal webserver
|
||||
configCopy := *config
|
||||
config = &configCopy
|
||||
config.RedirectURL = RedirectURL
|
||||
}
|
||||
|
||||
// Make random state
|
||||
stateBytes := make([]byte, 16)
|
||||
_, err := rand.Read(stateBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state := fmt.Sprintf("%x", stateBytes)
|
||||
authUrl := config.AuthCodeURL(state)
|
||||
|
||||
// Prepare webserver
|
||||
server := authServer{
|
||||
state: state,
|
||||
bindAddress: bindAddress,
|
||||
authUrl: authUrl,
|
||||
}
|
||||
if useWebServer {
|
||||
server.code = make(chan string, 1)
|
||||
go server.Start()
|
||||
defer server.Stop()
|
||||
authUrl = "http://" + bindAddress + "/auth"
|
||||
}
|
||||
|
||||
// Generate a URL for the user to visit for authorization.
|
||||
_ = open.Start(authUrl)
|
||||
fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authUrl)
|
||||
fmt.Printf("Log in and authorize rclone for access\n")
|
||||
|
||||
var authCode string
|
||||
if useWebServer {
|
||||
// Read the code, and exchange it for a token.
|
||||
fmt.Printf("Waiting for code...\n")
|
||||
authCode = <-server.code
|
||||
if authCode != "" {
|
||||
fmt.Printf("Got code\n")
|
||||
} else {
|
||||
return fmt.Errorf("Failed to get code")
|
||||
}
|
||||
} else {
|
||||
// Read the code, and exchange it for a token.
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode = fs.ReadLine()
|
||||
}
|
||||
token, err := config.Exchange(oauth2.NoContext, authCode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get token: %v", err)
|
||||
}
|
||||
return putToken(name, token)
|
||||
}
|
||||
|
||||
// Local web server for collecting auth
|
||||
type authServer struct {
|
||||
state string
|
||||
listener net.Listener
|
||||
bindAddress string
|
||||
code chan string
|
||||
authUrl string
|
||||
}
|
||||
|
||||
// startWebServer runs an internal web server to receive config details
|
||||
func (s *authServer) Start() {
|
||||
fs.Debug(nil, "Starting auth server on %s", s.bindAddress)
|
||||
mux := http.NewServeMux()
|
||||
server := &http.Server{
|
||||
Addr: s.bindAddress,
|
||||
Handler: mux,
|
||||
}
|
||||
mux.HandleFunc("/favicon.ico", func(w http.ResponseWriter, req *http.Request) {
|
||||
http.Error(w, "", 404)
|
||||
return
|
||||
})
|
||||
mux.HandleFunc("/auth", func(w http.ResponseWriter, req *http.Request) {
|
||||
http.Redirect(w, req, s.authUrl, 307)
|
||||
return
|
||||
})
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
|
||||
fs.Debug(nil, "Received request on auth server")
|
||||
code := req.FormValue("code")
|
||||
if code != "" {
|
||||
state := req.FormValue("state")
|
||||
if state != s.state {
|
||||
fs.Debug(nil, "State did not match: want %q got %q", s.state, state)
|
||||
fmt.Fprintf(w, "<h1>Failure</h1>\n<p>Auth state doesn't match</p>")
|
||||
} else {
|
||||
fs.Debug(nil, "Successfully got code")
|
||||
if s.code != nil {
|
||||
fmt.Fprintf(w, "<h1>Success</h1>\n<p>Go back to rclone to continue</p>")
|
||||
s.code <- code
|
||||
} else {
|
||||
fmt.Fprintf(w, "<h1>Success</h1>\n<p>Cut and paste this code into rclone: <code>%s</code></p>", code)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
fs.Debug(nil, "No code found on request")
|
||||
fmt.Fprintf(w, "<h1>Failed!</h1>\nNo code found.")
|
||||
http.Error(w, "", 500)
|
||||
})
|
||||
|
||||
var err error
|
||||
s.listener, err = net.Listen("tcp", s.bindAddress)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to start auth webserver: %v", err)
|
||||
}
|
||||
server.Serve(s.listener)
|
||||
fs.Debug(nil, "Closed auth server")
|
||||
}
|
||||
|
||||
func (s *authServer) Stop() {
|
||||
fs.Debug(nil, "Closing auth server")
|
||||
if s.code != nil {
|
||||
close(s.code)
|
||||
s.code = nil
|
||||
}
|
||||
_ = s.listener.Close()
|
||||
}
|
||||
152
pacer/pacer.go
Normal file
152
pacer/pacer.go
Normal file
@@ -0,0 +1,152 @@
|
||||
// pacer is a utility package to make pacing and retrying API calls easy
|
||||
package pacer
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
type Pacer struct {
|
||||
minSleep time.Duration // minimum sleep time
|
||||
maxSleep time.Duration // maximum sleep time
|
||||
decayConstant uint // decay constant
|
||||
pacer chan struct{} // To pace the operations
|
||||
sleepTime time.Duration // Time to sleep for each transaction
|
||||
retries int // Max number of retries
|
||||
}
|
||||
|
||||
// Paced is a function which is called by the Call and CallNoRetry
|
||||
// methods. It should return a boolean, true if it would like to be
|
||||
// retried, and an error. This error may be returned or returned
|
||||
// wrapped in a RetryError.
|
||||
type Paced func() (bool, error)
|
||||
|
||||
// New returns a Pacer with sensible defaults
|
||||
func New() *Pacer {
|
||||
p := &Pacer{
|
||||
minSleep: 10 * time.Millisecond,
|
||||
maxSleep: 2 * time.Second,
|
||||
decayConstant: 2,
|
||||
retries: 10,
|
||||
pacer: make(chan struct{}, 1),
|
||||
}
|
||||
p.sleepTime = p.minSleep
|
||||
|
||||
// Put the first pacing token in
|
||||
p.pacer <- struct{}{}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// SetMinSleep sets the minimum sleep time for the pacer
|
||||
func (p *Pacer) SetMinSleep(t time.Duration) *Pacer {
|
||||
p.minSleep = t
|
||||
p.sleepTime = p.minSleep
|
||||
return p
|
||||
}
|
||||
|
||||
// SetMaxSleep sets the maximum sleep time for the pacer
|
||||
func (p *Pacer) SetMaxSleep(t time.Duration) *Pacer {
|
||||
p.maxSleep = t
|
||||
p.sleepTime = p.minSleep
|
||||
return p
|
||||
}
|
||||
|
||||
// SetDecayConstant sets the decay constant for the pacer
|
||||
//
|
||||
// This is the speed the time falls back to the minimum after errors
|
||||
// have occurred.
|
||||
//
|
||||
// bigger for slower decay, exponential
|
||||
func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
|
||||
p.decayConstant = decay
|
||||
return p
|
||||
}
|
||||
|
||||
// SetRetries sets the max number of tries for Call
|
||||
func (p *Pacer) SetRetries(retries int) *Pacer {
|
||||
p.retries = retries
|
||||
return p
|
||||
}
|
||||
|
||||
// Start a call to the API
|
||||
//
|
||||
// This must be called as a pair with endCall
|
||||
//
|
||||
// This waits for the pacer token
|
||||
func (p *Pacer) beginCall() {
|
||||
// pacer starts with a token in and whenever we take one out
|
||||
// XXX ms later we put another in. We could do this with a
|
||||
// Ticker more accurately, but then we'd have to work out how
|
||||
// not to run it when it wasn't needed
|
||||
<-p.pacer
|
||||
|
||||
// Restart the timer
|
||||
go func(t time.Duration) {
|
||||
// fs.Debug(f, "New sleep for %v at %v", t, time.Now())
|
||||
time.Sleep(t)
|
||||
p.pacer <- struct{}{}
|
||||
}(p.sleepTime)
|
||||
}
|
||||
|
||||
// End a call to the API
|
||||
//
|
||||
// Refresh the pace given an error that was returned. It returns a
|
||||
// boolean as to whether the operation should be retried.
|
||||
func (p *Pacer) endCall(again bool) {
|
||||
oldSleepTime := p.sleepTime
|
||||
if again {
|
||||
p.sleepTime *= 2
|
||||
if p.sleepTime > p.maxSleep {
|
||||
p.sleepTime = p.maxSleep
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debug("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
|
||||
if p.sleepTime < p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debug("pacer", "Reducing sleep to %v", p.sleepTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// call implements Call but with settable retries
|
||||
func (p *Pacer) call(fn Paced, retries int) (err error) {
|
||||
var again bool
|
||||
for i := 0; i < retries; i++ {
|
||||
p.beginCall()
|
||||
again, err = fn()
|
||||
p.endCall(again)
|
||||
if !again {
|
||||
break
|
||||
}
|
||||
}
|
||||
if again {
|
||||
err = fs.RetryError(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Call paces the remote operations to not exceed the limits and retry
|
||||
// on rate limit exceeded
|
||||
//
|
||||
// This calls fn, expecting it to return a retry flag and an
|
||||
// error. This error may be returned wrapped in a RetryError if the
|
||||
// number of retries is exceeded.
|
||||
func (p *Pacer) Call(fn Paced) (err error) {
|
||||
return p.call(fn, p.retries)
|
||||
}
|
||||
|
||||
// Pace the remote operations to not exceed Amazon's limits and return
|
||||
// a retry error on rate limit exceeded
|
||||
//
|
||||
// This calls fn and wraps the output in a RetryError if it would like
|
||||
// it to be retried
|
||||
func (p *Pacer) CallNoRetry(fn Paced) error {
|
||||
return p.call(fn, 1)
|
||||
}
|
||||
146
rclone.go
146
rclone.go
@@ -12,10 +12,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ogier/pflag"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/drive"
|
||||
_ "github.com/ncw/rclone/dropbox"
|
||||
_ "github.com/ncw/rclone/googlecloudstorage"
|
||||
@@ -31,16 +32,18 @@ var (
|
||||
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats (0 to disable)")
|
||||
version = pflag.BoolP("version", "V", false, "Print the version number")
|
||||
logFile = pflag.StringP("log-file", "", "", "Log everything to this file")
|
||||
retries = pflag.IntP("retries", "", 3, "Retry operations this many times if they fail")
|
||||
)
|
||||
|
||||
type Command struct {
|
||||
Name string
|
||||
Help string
|
||||
ArgsHelp string
|
||||
Run func(fdst, fsrc fs.Fs)
|
||||
Run func(fdst, fsrc fs.Fs) error
|
||||
MinArgs int
|
||||
MaxArgs int
|
||||
NoStats bool
|
||||
Retry bool
|
||||
}
|
||||
|
||||
// checkArgs checks there are enough arguments and prints a message if not
|
||||
@@ -64,14 +67,12 @@ var Commands = []Command{
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
MD5SUM. Doesn't delete files from the destination.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Sync(fdst, fsrc, false)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to copy: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.CopyDir(fdst, fsrc)
|
||||
},
|
||||
MinArgs: 2,
|
||||
MaxArgs: 2,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "sync",
|
||||
@@ -82,25 +83,35 @@ var Commands = []Command{
|
||||
modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary. Since this can
|
||||
cause data loss, test first with the --dry-run flag.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Sync(fdst, fsrc, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sync: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Sync(fdst, fsrc)
|
||||
},
|
||||
MinArgs: 2,
|
||||
MaxArgs: 2,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "move",
|
||||
ArgsHelp: "source:path dest:path",
|
||||
Help: `
|
||||
Moves the source to the destination. This is equivalent to a
|
||||
copy followed by a purge, but may use server side operations
|
||||
to speed it up. Since this can cause data loss, test first
|
||||
with the --dry-run flag.`,
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.MoveDir(fdst, fsrc)
|
||||
},
|
||||
MinArgs: 2,
|
||||
MaxArgs: 2,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "ls",
|
||||
ArgsHelp: "[remote:path]",
|
||||
Help: `
|
||||
List all the objects in the the path with size and path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.List(fdst, os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.List(fdst, os.Stdout)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
@@ -110,11 +121,8 @@ var Commands = []Command{
|
||||
ArgsHelp: "[remote:path]",
|
||||
Help: `
|
||||
List all directories/containers/buckets in the the path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.ListDir(fdst, os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to listdir: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.ListDir(fdst, os.Stdout)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
@@ -125,11 +133,8 @@ var Commands = []Command{
|
||||
Help: `
|
||||
List all the objects in the the path with modification time,
|
||||
size and path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.ListLong(fdst, os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list long: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.ListLong(fdst, os.Stdout)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
@@ -140,11 +145,8 @@ var Commands = []Command{
|
||||
Help: `
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Md5sum(fdst, os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Md5sum(fdst, os.Stdout)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
@@ -154,14 +156,12 @@ var Commands = []Command{
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
Make the path if it doesn't already exist`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Mkdir(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to mkdir: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Mkdir(fdst)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "rmdir",
|
||||
@@ -169,28 +169,24 @@ var Commands = []Command{
|
||||
Help: `
|
||||
Remove the path. Note that you can't remove a path with
|
||||
objects in it, use purge for that.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Rmdir(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to rmdir: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Rmdir(fdst)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "purge",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
Remove the path and all of its contents.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Purge(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to purge: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Purge(fdst)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "check",
|
||||
@@ -199,11 +195,8 @@ var Commands = []Command{
|
||||
Checks the files in the source and destination match. It
|
||||
compares sizes and MD5SUMs and prints a report of files which
|
||||
don't match. It doesn't alter the source or destination.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Check(fdst, fsrc)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to check: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Check(fdst, fsrc)
|
||||
},
|
||||
MinArgs: 2,
|
||||
MaxArgs: 2,
|
||||
@@ -212,8 +205,9 @@ var Commands = []Command{
|
||||
Name: "config",
|
||||
Help: `
|
||||
Enter an interactive configuration session.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
fs.EditConfig()
|
||||
return nil
|
||||
},
|
||||
NoStats: true,
|
||||
},
|
||||
@@ -243,8 +237,8 @@ Subcommands:
|
||||
fmt.Fprintf(os.Stderr, "Options:\n")
|
||||
pflag.PrintDefaults()
|
||||
fmt.Fprintf(os.Stderr, `
|
||||
It is only necessary to use a unique prefix of the subcommand, eg 'up'
|
||||
for 'upload'.
|
||||
It is only necessary to use a unique prefix of the subcommand, eg 'mo'
|
||||
for 'move'.
|
||||
`)
|
||||
}
|
||||
|
||||
@@ -289,6 +283,7 @@ func ParseCommand() (*Command, []string) {
|
||||
args = args[1:]
|
||||
|
||||
// Find the command doing a prefix match
|
||||
var found = make([]*Command, 0, 1)
|
||||
var command *Command
|
||||
for i := range Commands {
|
||||
trialCommand := &Commands[i]
|
||||
@@ -297,16 +292,24 @@ func ParseCommand() (*Command, []string) {
|
||||
command = trialCommand
|
||||
break
|
||||
} else if strings.HasPrefix(trialCommand.Name, cmd) {
|
||||
if command != nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatalf("Not unique - matches multiple commands %q", cmd)
|
||||
}
|
||||
command = trialCommand
|
||||
found = append(found, trialCommand)
|
||||
}
|
||||
}
|
||||
if command == nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatalf("Unknown command %q", cmd)
|
||||
switch len(found) {
|
||||
case 0:
|
||||
fs.Stats.Error()
|
||||
log.Fatalf("Unknown command %q", cmd)
|
||||
case 1:
|
||||
command = found[0]
|
||||
default:
|
||||
fs.Stats.Error()
|
||||
var names []string
|
||||
for _, cmd := range found {
|
||||
names = append(names, `"`+cmd.Name+`"`)
|
||||
}
|
||||
log.Fatalf("Not unique - matches multiple commands: %s", strings.Join(names, ", "))
|
||||
}
|
||||
}
|
||||
if command.Run == nil {
|
||||
syntaxError()
|
||||
@@ -376,7 +379,24 @@ func main() {
|
||||
|
||||
// Run the actual command
|
||||
if command.Run != nil {
|
||||
command.Run(fdst, fsrc)
|
||||
var err error
|
||||
for try := 1; try <= *retries; try++ {
|
||||
err = command.Run(fdst, fsrc)
|
||||
if !command.Retry || (err == nil && !fs.Stats.Errored()) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fs.Log(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
|
||||
} else {
|
||||
fs.Log(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
|
||||
}
|
||||
if try < *retries {
|
||||
fs.Stats.ResetErrors()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to %s: %v", command.Name, err)
|
||||
}
|
||||
if !command.NoStats && (!fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0) {
|
||||
fmt.Fprintln(os.Stderr, fs.Stats)
|
||||
}
|
||||
|
||||
56
s3/s3.go
56
s3/s3.go
@@ -24,8 +24,9 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/service"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/ncw/rclone/fs"
|
||||
@@ -128,6 +129,7 @@ const (
|
||||
|
||||
// FsS3 represents a remote s3 server
|
||||
type FsS3 struct {
|
||||
name string // the name of the remote
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
bucket string // the bucket we are working on
|
||||
perm string // permissions for new buckets / objects
|
||||
@@ -151,6 +153,19 @@ type FsObjectS3 struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsS3) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// The root of the remote (as passed into NewFs)
|
||||
func (f *FsS3) Root() string {
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this FsS3 to a string
|
||||
func (f *FsS3) String() string {
|
||||
if f.root == "" {
|
||||
@@ -206,7 +221,7 @@ func s3Connection(name string) (*s3.S3, error) {
|
||||
c := s3.New(awsConfig)
|
||||
if region == "other-v2-signature" {
|
||||
fs.Debug(name, "Using v2 auth")
|
||||
signer := func(req *service.Request) {
|
||||
signer := func(req *request.Request) {
|
||||
// Ignore AnonymousCredentials object
|
||||
if req.Service.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
@@ -214,11 +229,11 @@ func s3Connection(name string) (*s3.S3, error) {
|
||||
sign(accessKeyId, secretAccessKey, req.HTTPRequest)
|
||||
}
|
||||
c.Handlers.Sign.Clear()
|
||||
c.Handlers.Sign.PushBack(service.BuildContentLength)
|
||||
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
c.Handlers.Sign.PushBack(signer)
|
||||
}
|
||||
// Add user agent
|
||||
c.Handlers.Build.PushBack(func(r *service.Request) {
|
||||
c.Handlers.Build.PushBack(func(r *request.Request) {
|
||||
r.HTTPRequest.Header.Set("User-Agent", fs.UserAgent)
|
||||
})
|
||||
return c, nil
|
||||
@@ -235,6 +250,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
f := &FsS3{
|
||||
name: name,
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
// FIXME perm: s3.Private, // FIXME need user to specify
|
||||
@@ -475,6 +491,37 @@ func (f *FsS3) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsS3) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectS3)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.s3
|
||||
key := f.root + remote
|
||||
source := srcFs.bucket + "/" + srcFs.root + srcObj.remote
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &f.bucket,
|
||||
Key: &key,
|
||||
CopySource: &source,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
}
|
||||
_, err := f.c.CopyObject(&req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewFsObject(remote), err
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
@@ -672,4 +719,5 @@ func (o *FsObjectS3) Remove() error {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsS3{}
|
||||
var _ fs.Copier = &FsS3{}
|
||||
var _ fs.Object = &FsObjectS3{}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test S3 filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: go run gen_tests.go or make gen_tests
|
||||
// Regenerate with: make gen_tests
|
||||
package s3_test
|
||||
|
||||
import (
|
||||
@@ -34,6 +34,9 @@ func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
|
||||
@@ -58,6 +58,7 @@ func init() {
|
||||
|
||||
// FsSwift represents a remote swift server
|
||||
type FsSwift struct {
|
||||
name string // name of this remote
|
||||
c swift.Connection // the connection to the swift server
|
||||
container string // the container we are working on
|
||||
root string // the path we are working on if any
|
||||
@@ -75,6 +76,19 @@ type FsObjectSwift struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsSwift) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// The root of the remote (as passed into NewFs)
|
||||
func (f *FsSwift) Root() string {
|
||||
if f.root == "" {
|
||||
return f.container
|
||||
}
|
||||
return f.container + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this FsSwift to a string
|
||||
func (f *FsSwift) String() string {
|
||||
if f.root == "" {
|
||||
@@ -141,6 +155,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
f := &FsSwift{
|
||||
name: name,
|
||||
c: *c,
|
||||
container: container,
|
||||
root: directory,
|
||||
@@ -321,6 +336,29 @@ func (fs *FsSwift) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsSwift) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectSwift)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.swift
|
||||
_, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewFsObject(remote), nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
@@ -439,4 +477,5 @@ func (o *FsObjectSwift) Remove() error {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsSwift{}
|
||||
var _ fs.Copier = &FsSwift{}
|
||||
var _ fs.Object = &FsObjectSwift{}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test Swift filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: go run gen_tests.go or make gen_tests
|
||||
// Regenerate with: make gen_tests
|
||||
package swift_test
|
||||
|
||||
import (
|
||||
@@ -34,6 +34,9 @@ func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
|
||||
Reference in New Issue
Block a user