mirror of
https://github.com/rclone/rclone.git
synced 2026-01-12 05:23:55 +00:00
Compare commits
47 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4276abc58b | ||
|
|
a795d93bc3 | ||
|
|
5df04cb763 | ||
|
|
ef54167a4a | ||
|
|
d42cb11b84 | ||
|
|
b257de4aba | ||
|
|
365b4babae | ||
|
|
6d48dffa2f | ||
|
|
8f2999b6af | ||
|
|
be6115fbfa | ||
|
|
2fcb8f5db7 | ||
|
|
0ab3f020ab | ||
|
|
64c23c2f5b | ||
|
|
ff16e0f6df | ||
|
|
1a82ba196b | ||
|
|
ed72c678f8 | ||
|
|
4ed8836a71 | ||
|
|
5529978fa7 | ||
|
|
66d84c9914 | ||
|
|
b85ddc4e4f | ||
|
|
e4a9e27a55 | ||
|
|
22645eea2e | ||
|
|
345c98ed62 | ||
|
|
b872ff0237 | ||
|
|
1b95718460 | ||
|
|
6a3580c556 | ||
|
|
16c9fba5de | ||
|
|
4e952af614 | ||
|
|
6344c3051c | ||
|
|
ab9f521cbd | ||
|
|
3a900e5bb7 | ||
|
|
b4d7741611 | ||
|
|
95fd79faf9 | ||
|
|
b79dc01016 | ||
|
|
bf562d7373 | ||
|
|
2e9f2ea3d3 | ||
|
|
177dbbc29a | ||
|
|
4712043e26 | ||
|
|
852acd5e4e | ||
|
|
9f1daabb2c | ||
|
|
938dd24cc9 | ||
|
|
57aad81b68 | ||
|
|
a91bcaaeb0 | ||
|
|
d04c21b198 | ||
|
|
4a0a42c2f1 | ||
|
|
cc7b9af50e | ||
|
|
68fef49c55 |
14
.travis.yml
14
.travis.yml
@@ -1,12 +1,24 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
go:
|
||||
- 1.3.3
|
||||
- 1.4.2
|
||||
- 1.5
|
||||
- tip
|
||||
|
||||
install:
|
||||
- go get ./...
|
||||
- go get -u github.com/golang/lint/golint
|
||||
- go get -u golang.org/x/tools/cmd/goimports
|
||||
|
||||
script:
|
||||
- go get ./...
|
||||
- go vet ./...
|
||||
- diff <(goimports -d .) <(printf "")
|
||||
- diff <(golint ./...) <(printf "")
|
||||
- go test -v ./...
|
||||
- go test -cpu=2 -race -v ./...
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,3 +1,4 @@
|
||||
SHELL = /bin/bash
|
||||
TAG := $(shell git describe --tags)
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
@@ -14,6 +15,7 @@ check: rclone
|
||||
go vet ./...
|
||||
errcheck ./...
|
||||
golint ./...
|
||||
diff <(goimports -d .) <(printf "")
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
[Installation](http://rclone.org/install/) |
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone) [](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone) [](https://ci.appveyor.com/project/ncw/rclone) [](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
|
||||
|
||||
@@ -16,9 +16,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -71,16 +69,16 @@ func init() {
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: oauthutil.ConfigClientID,
|
||||
Help: "Amazon Application Client Id - leave blank to use rclone's.",
|
||||
Help: "Amazon Application Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: oauthutil.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret - leave blank to use rclone's.",
|
||||
Help: "Amazon Application Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// FsAcd represents a remote acd server
|
||||
type FsAcd struct {
|
||||
// Fs represents a remote acd server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
c *acd.Client // the connection to the acd server
|
||||
root string // the path we are working on
|
||||
@@ -88,11 +86,11 @@ type FsAcd struct {
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// FsObjectAcd describes a acd object
|
||||
// Object describes a acd object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type FsObjectAcd struct {
|
||||
acd *FsAcd // what this object is part of
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
info *acd.Node // Info from the acd object if known
|
||||
}
|
||||
@@ -100,17 +98,17 @@ type FsObjectAcd struct {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *FsAcd) Name() string {
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *FsAcd) Root() string {
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this FsAcd to a string
|
||||
func (f *FsAcd) String() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Amazon cloud drive root '%s'", f.root)
|
||||
}
|
||||
|
||||
@@ -134,31 +132,10 @@ var retryErrorCodes = []int{
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// See if HTTP error code is to be retried
|
||||
if err != nil && resp != nil {
|
||||
for _, e := range retryErrorCodes {
|
||||
if resp.StatusCode == e {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow retry if request times out. Adapted from
|
||||
// http://stackoverflow.com/questions/23494950/specifically-check-for-timeout-error
|
||||
switch err := err.(type) {
|
||||
case *url.Error:
|
||||
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
|
||||
return true, err
|
||||
}
|
||||
case net.Error:
|
||||
if err.Timeout() {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// NewFs constructs an FsAcd from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, err := oauthutil.NewClient(name, acdConfig)
|
||||
@@ -168,7 +145,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
c.UserAgent = fs.UserAgent
|
||||
f := &FsAcd{
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
c: c,
|
||||
@@ -225,9 +202,9 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsAcd) newFsObjectWithInfo(remote string, info *acd.Node) fs.Object {
|
||||
o := &FsObjectAcd{
|
||||
acd: f,
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info *acd.Node) fs.Object {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
@@ -246,12 +223,12 @@ func (f *FsAcd) newFsObjectWithInfo(remote string, info *acd.Node) fs.Object {
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsAcd) NewFsObject(remote string) fs.Object {
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *FsAcd) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
//fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
@@ -278,7 +255,7 @@ func (f *FsAcd) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *FsAcd) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
|
||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
@@ -306,7 +283,7 @@ type listAllFn func(*acd.Node) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *FsAcd) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
query := "parents:" + dirID
|
||||
if directoriesOnly {
|
||||
query += " AND kind:" + folderKind
|
||||
@@ -359,7 +336,7 @@ OUTER:
|
||||
//
|
||||
// This fetches the minimum amount of stuff but does more API calls
|
||||
// which makes it slow
|
||||
func (f *FsAcd) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
|
||||
func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
|
||||
var subError error
|
||||
// Make the API request
|
||||
var wg sync.WaitGroup
|
||||
@@ -400,7 +377,7 @@ func (f *FsAcd) listDirRecursive(dirID string, path string, out fs.ObjectsChan)
|
||||
}
|
||||
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *FsAcd) List() fs.ObjectsChan {
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
@@ -420,7 +397,7 @@ func (f *FsAcd) List() fs.ObjectsChan {
|
||||
}
|
||||
|
||||
// ListDir lists the directories
|
||||
func (f *FsAcd) ListDir() fs.DirChan {
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
@@ -453,17 +430,17 @@ func (f *FsAcd) ListDir() fs.DirChan {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsAcd) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
o := &FsObjectAcd{
|
||||
acd: f,
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
leaf, directoryID, err := f.dirCache.FindPath(remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
folder := acd.FolderFromId(directoryID, o.acd.c.Nodes)
|
||||
folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
|
||||
var info *acd.File
|
||||
var resp *http.Response
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -482,13 +459,13 @@ func (f *FsAcd) Put(in io.Reader, remote string, modTime time.Time, size int64)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *FsAcd) Mkdir() error {
|
||||
func (f *Fs) Mkdir() error {
|
||||
return f.dirCache.FindRoot(true)
|
||||
}
|
||||
|
||||
// purgeCheck remotes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *FsAcd) purgeCheck(check bool) error {
|
||||
func (f *Fs) purgeCheck(check bool) error {
|
||||
if f.root == "" {
|
||||
return fmt.Errorf("Can't purge root directory")
|
||||
}
|
||||
@@ -502,7 +479,7 @@ func (f *FsAcd) purgeCheck(check bool) error {
|
||||
if check {
|
||||
// check directory is empty
|
||||
empty := true
|
||||
_, err := f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
|
||||
_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
empty = false
|
||||
@@ -543,12 +520,12 @@ func (f *FsAcd) purgeCheck(check bool) error {
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsAcd) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
return f.purgeCheck(true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *FsAcd) Precision() time.Duration {
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
@@ -561,13 +538,13 @@ func (f *FsAcd) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
//func (f *FsAcd) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// srcObj, ok := src.(*FsObjectAcd)
|
||||
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// srcObj, ok := src.(*Object)
|
||||
// if !ok {
|
||||
// fs.Debug(src, "Can't copy - not same remote type")
|
||||
// return nil, fs.ErrorCantCopy
|
||||
// }
|
||||
// srcFs := srcObj.acd
|
||||
// srcFs := srcObj.fs
|
||||
// _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
@@ -580,19 +557,19 @@ func (f *FsAcd) Precision() time.Duration {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *FsAcd) Purge() error {
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purgeCheck(false)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *FsObjectAcd) Fs() fs.Fs {
|
||||
return o.acd
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectAcd) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
@@ -600,12 +577,12 @@ func (o *FsObjectAcd) String() string {
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *FsObjectAcd) Remote() string {
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectAcd) Md5sum() (string, error) {
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
if o.info.ContentProperties.Md5 != nil {
|
||||
return *o.info.ContentProperties.Md5, nil
|
||||
}
|
||||
@@ -613,25 +590,25 @@ func (o *FsObjectAcd) Md5sum() (string, error) {
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectAcd) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(*o.info.ContentProperties.Size)
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *FsObjectAcd) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.info != nil {
|
||||
return nil
|
||||
}
|
||||
leaf, directoryID, err := o.acd.dirCache.FindPath(o.remote, false)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(o.remote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
folder := acd.FolderFromId(directoryID, o.acd.c.Nodes)
|
||||
folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
|
||||
var resp *http.Response
|
||||
var info *acd.File
|
||||
err = o.acd.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.GetFile(leaf)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -648,7 +625,7 @@ func (o *FsObjectAcd) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectAcd) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
@@ -663,21 +640,21 @@ func (o *FsObjectAcd) ModTime() time.Time {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *FsObjectAcd) SetModTime(modTime time.Time) {
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
// FIXME not implemented
|
||||
return
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *FsObjectAcd) Storable() bool {
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectAcd) Open() (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
file := acd.File{Node: o.info}
|
||||
var resp *http.Response
|
||||
err = o.acd.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
in, resp, err = file.Open()
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -687,12 +664,12 @@ func (o *FsObjectAcd) Open() (in io.ReadCloser, err error) {
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectAcd) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
file := acd.File{Node: o.info}
|
||||
var info *acd.File
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = o.acd.pacer.CallNoRetry(func() (bool, error) {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size != 0 {
|
||||
info, resp, err = file.OverwriteSized(in, size)
|
||||
} else {
|
||||
@@ -708,10 +685,10 @@ func (o *FsObjectAcd) Update(in io.Reader, modTime time.Time, size int64) error
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectAcd) Remove() error {
|
||||
func (o *Object) Remove() error {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = o.acd.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.info.Trash()
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -720,10 +697,10 @@ func (o *FsObjectAcd) Remove() error {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*FsAcd)(nil)
|
||||
_ fs.Purger = (*FsAcd)(nil)
|
||||
// _ fs.Copier = (*FsAcd)(nil)
|
||||
// _ fs.Mover = (*FsAcd)(nil)
|
||||
// _ fs.DirMover = (*FsAcd)(nil)
|
||||
_ fs.Object = (*FsObjectAcd)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
// _ fs.Copier = (*Fs)(nil)
|
||||
// _ fs.Mover = (*Fs)(nil)
|
||||
// _ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.FsObjectAcd)(nil))
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
}
|
||||
|
||||
|
||||
21
appveyor.yml
Normal file
21
appveyor.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- go get golang.org/x/tools/cmd/vet
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -d ./...
|
||||
|
||||
build_script:
|
||||
- go vet ./...
|
||||
- go test -v -cpu=2 ./...
|
||||
- go test -cpu=2 -short -race ./...
|
||||
@@ -12,9 +12,10 @@ import (
|
||||
|
||||
// DirCache caches paths to directory IDs and vice versa
|
||||
type DirCache struct {
|
||||
mu sync.RWMutex
|
||||
cacheMu sync.RWMutex
|
||||
cache map[string]string
|
||||
invCache map[string]string
|
||||
mu sync.Mutex
|
||||
fs DirCacher // Interface to find and make stuff
|
||||
trueRootID string // ID of the absolute root
|
||||
root string // the path we are working on
|
||||
@@ -43,52 +44,36 @@ func New(root string, trueRootID string, fs DirCacher) *DirCache {
|
||||
return d
|
||||
}
|
||||
|
||||
// _get an ID given a path - without lock
|
||||
func (dc *DirCache) _get(path string) (id string, ok bool) {
|
||||
id, ok = dc.cache[path]
|
||||
return
|
||||
}
|
||||
|
||||
// Get an ID given a path
|
||||
func (dc *DirCache) Get(path string) (id string, ok bool) {
|
||||
dc.mu.RLock()
|
||||
id, ok = dc._get(path)
|
||||
dc.mu.RUnlock()
|
||||
dc.cacheMu.RLock()
|
||||
id, ok = dc.cache[path]
|
||||
dc.cacheMu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// GetInv gets a path given an ID
|
||||
func (dc *DirCache) GetInv(path string) (id string, ok bool) {
|
||||
dc.mu.RLock()
|
||||
id, ok = dc.invCache[path]
|
||||
dc.mu.RUnlock()
|
||||
func (dc *DirCache) GetInv(id string) (path string, ok bool) {
|
||||
dc.cacheMu.RLock()
|
||||
path, ok = dc.invCache[id]
|
||||
dc.cacheMu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// _put a path, id into the map without lock
|
||||
func (dc *DirCache) _put(path, id string) {
|
||||
dc.cache[path] = id
|
||||
dc.invCache[id] = path
|
||||
}
|
||||
|
||||
// Put a path, id into the map
|
||||
func (dc *DirCache) Put(path, id string) {
|
||||
dc.mu.Lock()
|
||||
dc._put(path, id)
|
||||
dc.mu.Unlock()
|
||||
}
|
||||
|
||||
// _flush the map of all data without lock
|
||||
func (dc *DirCache) _flush() {
|
||||
dc.cache = make(map[string]string)
|
||||
dc.invCache = make(map[string]string)
|
||||
dc.cacheMu.Lock()
|
||||
dc.cache[path] = id
|
||||
dc.invCache[id] = path
|
||||
dc.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
// Flush the map of all data
|
||||
func (dc *DirCache) Flush() {
|
||||
dc.mu.Lock()
|
||||
dc._flush()
|
||||
dc.mu.Unlock()
|
||||
dc.cacheMu.Lock()
|
||||
dc.cache = make(map[string]string)
|
||||
dc.invCache = make(map[string]string)
|
||||
dc.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
// SplitPath splits a path into directory, leaf
|
||||
@@ -120,8 +105,8 @@ func SplitPath(path string) (directory, leaf string) {
|
||||
// If not found strip the last path off the path and recurse
|
||||
// Now have a parent directory id, so look in the parent for self and return it
|
||||
func (dc *DirCache) FindDir(path string, create bool) (pathID string, err error) {
|
||||
dc.mu.RLock()
|
||||
defer dc.mu.RUnlock()
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
return dc._findDir(path, create)
|
||||
}
|
||||
|
||||
@@ -135,7 +120,7 @@ func (dc *DirCache) _findDirInCache(path string) string {
|
||||
}
|
||||
|
||||
// If it is in the cache then return it
|
||||
pathID, ok := dc._get(path)
|
||||
pathID, ok := dc.Get(path)
|
||||
if ok {
|
||||
// fmt.Println("Cache hit on", path)
|
||||
return pathID
|
||||
@@ -146,10 +131,6 @@ func (dc *DirCache) _findDirInCache(path string) string {
|
||||
|
||||
// Unlocked findDir - must have mu
|
||||
func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error) {
|
||||
// if !dc.foundRoot {
|
||||
// return "", fmt.Errorf("FindDir called before FindRoot")
|
||||
// }
|
||||
|
||||
pathID = dc._findDirInCache(path)
|
||||
if pathID != "" {
|
||||
return pathID, nil
|
||||
@@ -184,7 +165,7 @@ func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error
|
||||
}
|
||||
|
||||
// Store the leaf directory in the cache
|
||||
dc._put(path, pathID)
|
||||
dc.Put(path, pathID)
|
||||
|
||||
// fmt.Println("Dir", path, "is", pathID)
|
||||
return pathID, nil
|
||||
@@ -219,26 +200,32 @@ func (dc *DirCache) FindRoot(create bool) error {
|
||||
if dc.foundRoot {
|
||||
return nil
|
||||
}
|
||||
dc.foundRoot = true
|
||||
rootID, err := dc._findDir(dc.root, create)
|
||||
if err != nil {
|
||||
dc.foundRoot = false
|
||||
return err
|
||||
}
|
||||
dc.foundRoot = true
|
||||
dc.rootID = rootID
|
||||
|
||||
// Find the parent of the root while we still have the root
|
||||
// directory tree cached
|
||||
rootParentPath, _ := SplitPath(dc.root)
|
||||
dc.rootParentID, _ = dc._get(rootParentPath)
|
||||
dc.rootParentID, _ = dc.Get(rootParentPath)
|
||||
|
||||
// Reset the tree based on dc.root
|
||||
dc._flush()
|
||||
dc.Flush()
|
||||
// Put the root directory in
|
||||
dc._put("", dc.rootID)
|
||||
dc.Put("", dc.rootID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FoundRoot returns whether the root directory has been found yet
|
||||
//
|
||||
// Call this from FindLeaf or CreateDir only
|
||||
func (dc *DirCache) FoundRoot() bool {
|
||||
return dc.foundRoot
|
||||
}
|
||||
|
||||
// RootID returns the ID of the root directory
|
||||
//
|
||||
// This should be called after FindRoot
|
||||
@@ -275,11 +262,11 @@ func (dc *DirCache) ResetRoot() {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
dc.foundRoot = false
|
||||
dc._flush()
|
||||
dc.Flush()
|
||||
|
||||
// Put the true root in
|
||||
dc.rootID = dc.trueRootID
|
||||
|
||||
// Put the root directory in
|
||||
dc._put("", dc.rootID)
|
||||
dc.Put("", dc.rootID)
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* Dropbox
|
||||
* Google Cloud Storage
|
||||
* Amazon Cloud Drive
|
||||
* Microsoft One Drive
|
||||
* The local filesystem
|
||||
|
||||
Features
|
||||
|
||||
@@ -4,7 +4,7 @@ description: "Rclone docs for Amazon Cloud Drive"
|
||||
date: "2015-09-06"
|
||||
---
|
||||
|
||||
<i class="fa fa-google"></i> Amazon Cloud Drive
|
||||
<i class="fa fa-amazon"></i> Amazon Cloud Drive
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
@@ -37,9 +37,9 @@ Choose a number from below
|
||||
6) s3
|
||||
7) swift
|
||||
type> 1
|
||||
Amazon Application Client Id - leave blank to use rclone's.
|
||||
Amazon Application Client Id - leave blank normally.
|
||||
client_id>
|
||||
Amazon Application Client Secret - leave blank to use rclone's.
|
||||
Amazon Application Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Remote config
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Authors"
|
||||
description: "Rclone Authors and Contributors"
|
||||
date: "2014-06-16"
|
||||
date: "2015-09-28"
|
||||
---
|
||||
|
||||
Authors
|
||||
@@ -17,3 +17,4 @@ Contributors
|
||||
* Shimon Doodkin <helpmepro1@gmail.com>
|
||||
* Colin Nicholson <colin@colinn.com>
|
||||
* Klaus Post <klauspost@gmail.com>
|
||||
* Sergey Tolmachev <tolsi.ru@gmail.com>
|
||||
|
||||
@@ -1,12 +1,43 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Changelog"
|
||||
date: "2015-09-22"
|
||||
date: "2015-11-07"
|
||||
---
|
||||
|
||||
Changelog
|
||||
---------
|
||||
|
||||
* v1.24 - 2015-11-07
|
||||
* New features
|
||||
* Add support for Microsoft One Drive
|
||||
* Add `--no-check-certificate` option to disable server certificate verification
|
||||
* Add async readahead buffer for faster transfer of big files
|
||||
* Fixes
|
||||
* Allow spaces in remotes and check remote names for validity at creation time
|
||||
* Allow '&' and disallow ':' in Windows filenames.
|
||||
* Swift
|
||||
* Ignore directory marker objects where appropriate - allows working with Hubic
|
||||
* Don't delete the container if fs wasn't at root
|
||||
* S3
|
||||
* Don't delete the bucket if fs wasn't at root
|
||||
* Google Cloud Storage
|
||||
* Don't delete the bucket if fs wasn't at root
|
||||
* v1.23 - 2015-10-03
|
||||
* New features
|
||||
* Implement `rclone size` for measuring remotes
|
||||
* Fixes
|
||||
* Fix headless config for drive and gcs
|
||||
* Tell the user they should try again if the webserver method failed
|
||||
* Improve output of `--dump-headers`
|
||||
* S3
|
||||
* Allow anonymous access to public buckets
|
||||
* Swift
|
||||
* Stop chunked operations logging "Failed to read info: Object Not Found"
|
||||
* Use Content-Length on uploads for extra reliability
|
||||
* v1.22 - 2015-09-28
|
||||
* Implement rsync like include and exclude flags
|
||||
* swift
|
||||
* Support files > 5GB - thanks Sergey Tolmachev
|
||||
* v1.21 - 2015-09-22
|
||||
* New features
|
||||
* Display individual transfer progress
|
||||
|
||||
@@ -58,24 +58,29 @@ modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary. Since this can
|
||||
cause data loss, test first with the `--dry-run` flag.
|
||||
|
||||
### rclone ls [remote:path] ###
|
||||
### rclone ls remote:path ###
|
||||
|
||||
List all the objects in the the path with size and path.
|
||||
|
||||
### rclone lsd [remote:path] ###
|
||||
### rclone lsd remote:path ###
|
||||
|
||||
List all directories/containers/buckets in the the path.
|
||||
|
||||
### rclone lsl [remote:path] ###
|
||||
### rclone lsl remote:path ###
|
||||
|
||||
List all the objects in the the path with modification time,
|
||||
size and path.
|
||||
|
||||
### rclone md5sum [remote:path] ###
|
||||
### rclone md5sum remote:path ###
|
||||
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.
|
||||
|
||||
### rclone size remote:path ###
|
||||
|
||||
Prints the total size of objects in remote:path and the number of
|
||||
objects.
|
||||
|
||||
### rclone mkdir remote:path ###
|
||||
|
||||
Make the path if it doesn't already exist
|
||||
@@ -288,3 +293,34 @@ here which are used for testing. These start with remote name eg
|
||||
### --cpuprofile=FILE ###
|
||||
|
||||
Write cpu profile to file. This can be analysed with `go tool pprof`.
|
||||
|
||||
### --no-check-certificate=true/false ###
|
||||
|
||||
`--no-check-certificate` controls whether a client verifies the
|
||||
server's certificate chain and host name.
|
||||
If `--no-check-certificate` is true, TLS accepts any certificate
|
||||
presented by the server and any host name in that certificate.
|
||||
In this mode, TLS is susceptible to man-in-the-middle attacks.
|
||||
|
||||
This option defaults to `false`.
|
||||
|
||||
**This should be used only for testing.**
|
||||
|
||||
Filtering
|
||||
---------
|
||||
|
||||
For the filtering options
|
||||
|
||||
* `--delete-excluded`
|
||||
* `--filter`
|
||||
* `--filter-from`
|
||||
* `--exclude`
|
||||
* `--exclude-from`
|
||||
* `--include`
|
||||
* `--include-from`
|
||||
* `--files-from`
|
||||
* `--min-size`
|
||||
* `--max-size`
|
||||
* `--dump-filters`
|
||||
|
||||
See the [filtering section](/filtering/).
|
||||
|
||||
@@ -2,38 +2,38 @@
|
||||
title: "Rclone downloads"
|
||||
description: "Download rclone binaries for your OS."
|
||||
type: page
|
||||
date: "2015-09-22"
|
||||
date: "2015-11-07"
|
||||
---
|
||||
|
||||
Rclone Download v1.21
|
||||
Rclone Download v1.24
|
||||
=====================
|
||||
|
||||
* Windows
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.21-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.21-windows-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-windows-amd64.zip)
|
||||
* OSX
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.21-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.21-osx-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-osx-amd64.zip)
|
||||
* Linux
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.21-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.21-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.21-linux-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.24-linux-arm.zip)
|
||||
* FreeBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.21-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.21-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.21-freebsd-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.24-freebsd-arm.zip)
|
||||
* NetBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.21-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.21-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.21-netbsd-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.24-netbsd-arm.zip)
|
||||
* OpenBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.21-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.21-openbsd-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.21-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.21-plan9-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.21-solaris-amd64.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-solaris-amd64.zip)
|
||||
|
||||
Downloads for scripting
|
||||
=======================
|
||||
|
||||
@@ -34,14 +34,14 @@ Choose a number from below
|
||||
3) local
|
||||
4) drive
|
||||
type> 4
|
||||
Google Application Client Id - leave blank to use rclone's.
|
||||
Google Application Client Id - leave blank normally.
|
||||
client_id>
|
||||
Google Application Client Secret - leave blank to use rclone's.
|
||||
Google Application Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Remote config
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
* Say N if you are working on a remote or headless machine or Y didn't work
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
|
||||
@@ -37,9 +37,9 @@ Choose a number from below
|
||||
5) dropbox
|
||||
6) drive
|
||||
type> 5
|
||||
Dropbox App Key - leave blank to use rclone's.
|
||||
Dropbox App Key - leave blank normally.
|
||||
app_key>
|
||||
Dropbox App Secret - leave blank to use rclone's.
|
||||
Dropbox App Secret - leave blank normally.
|
||||
app_secret>
|
||||
Remote config
|
||||
Please visit:
|
||||
|
||||
@@ -101,3 +101,39 @@ of metadata, which breaks the desired 1:1 mapping of files to objects.
|
||||
No, not at present. rclone only does uni-directional sync from A ->
|
||||
B. It may do in the future though since it has all the primitives - it
|
||||
just requires writing the algorithm to do it.
|
||||
|
||||
### Can I use rclone with an HTTP proxy? ###
|
||||
|
||||
Yes. rclone will use the environment variables `HTTP_PROXY`,
|
||||
`HTTPS_PROXY` and `NO_PROXY`, similar to cURL and other programs.
|
||||
|
||||
`HTTPS_PROXY` takes precedence over `HTTP_PROXY` for https requests.
|
||||
|
||||
The environment values may be either a complete URL or a "host[:port]",
|
||||
in which case the "http" scheme is assumed.
|
||||
|
||||
The `NO_PROXY` allows you to disable the proxy for specific hosts.
|
||||
Hosts must be comma separated, and can contain domains or parts.
|
||||
For instance "foo.com" also matches "bar.foo.com".
|
||||
|
||||
### Rclone gives x509: failed to load system roots and no roots provided error ###
|
||||
|
||||
This means that `rclone` can't file the SSL root certificates. Likely
|
||||
you are running `rclone` on a NAS with a cut-down Linux OS.
|
||||
|
||||
Rclone (via the Go runtime) tries to load the root certificates from
|
||||
these places on Linux.
|
||||
|
||||
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
|
||||
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
|
||||
"/etc/ssl/ca-bundle.pem", // OpenSUSE
|
||||
"/etc/pki/tls/cacert.pem", // OpenELEC
|
||||
|
||||
So doing something like this should fix the problem. It also sets the
|
||||
time which is important for SSL to work properly.
|
||||
|
||||
```
|
||||
mkdir -p /etc/ssl/certs/
|
||||
curl -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
|
||||
ntpclient -s -h pool.ntp.org
|
||||
```
|
||||
|
||||
273
docs/content/filtering.md
Normal file
273
docs/content/filtering.md
Normal file
@@ -0,0 +1,273 @@
|
||||
---
|
||||
title: "Filtering"
|
||||
description: "Filtering, includes and excludes"
|
||||
date: "2015-09-27"
|
||||
---
|
||||
|
||||
# Filtering, includes and excludes #
|
||||
|
||||
Rclone has a sophisticated set of include and exclude rules. Some of
|
||||
these are based on patterns and some on other things like file size.
|
||||
|
||||
Each path as it passes through rclone is matched against the include
|
||||
and exclude rules. The paths are matched without a leading `/`.
|
||||
|
||||
For example the files might be passed to the matching engine like this
|
||||
|
||||
* `file1.jpg`
|
||||
* `file2.jpg`
|
||||
* `directory/file3.jpg`
|
||||
|
||||
## Patterns ##
|
||||
|
||||
The patterns used to match files for inclusion or exclusion are based
|
||||
on "file globs" as used by the unix shell.
|
||||
|
||||
If the pattern starts with a `/` then it only matches at the top level
|
||||
of the directory tree. If it doesn't start with `/` then it is
|
||||
matched starting at the end of the path, but it will only match a
|
||||
complete path element.
|
||||
|
||||
file.jpg - matches "file.jpg"
|
||||
- matches "directory/file.jpg"
|
||||
- doesn't match "afile.jpg"
|
||||
- doesn't match "directory/afile.jpg"
|
||||
/file.jpg - matches "file.jpg"
|
||||
- doesn't match "afile.jpg"
|
||||
- doesn't match "directory/file.jpg"
|
||||
|
||||
A `*` matches anything but not a `/`.
|
||||
|
||||
*.jpg - matches "file.jpg"
|
||||
- matches "directory/file.jpg"
|
||||
- doesn't match "file.jpg/anotherfile.png"
|
||||
|
||||
Use `**` to match anything, including slashes.
|
||||
|
||||
dir/** - matches "dir/file.jpg"
|
||||
- matches "dir/dir1/dir2/file.jpg"
|
||||
- doesn't match "directory/file.jpg"
|
||||
- doesn't match "adir/file.jpg"
|
||||
|
||||
A `?` matches any character except a slash `/`.
|
||||
|
||||
l?ss - matches "less"
|
||||
- matches "lass"
|
||||
- doesn't match "floss"
|
||||
|
||||
A `[` and `]` together make a a character class, such as `[a-z]` or
|
||||
`[aeiou]` or `[[:alpha:]]`. See the [go regexp
|
||||
docs](https://golang.org/pkg/regexp/syntax/) for more info on these.
|
||||
|
||||
h[ae]llo - matches "hello"
|
||||
- matches "hallo"
|
||||
- doesn't match "hullo"
|
||||
|
||||
A `{` and `}` define a choice between elements. It should contain a
|
||||
comma seperated list of patterns, any of which might match. These
|
||||
patterns can contain wildcards.
|
||||
|
||||
{one,two}_potato - matches "one_potato"
|
||||
- matches "two_potato"
|
||||
- doesn't match "three_potato"
|
||||
- doesn't match "_potato"
|
||||
|
||||
Special characters can be escaped with a `\` before them.
|
||||
|
||||
\*.jpg - matches "*.jpg"
|
||||
\\.jpg - matches "\.jpg"
|
||||
\[one\].jpg - matches "[one].jpg"
|
||||
|
||||
### Differences between rsync and rclone patterns ###
|
||||
|
||||
Rclone implements bash style `{a,b,c}` glob matching which rsync doesn't.
|
||||
|
||||
Rclone ignores `/` at the end of a pattern.
|
||||
|
||||
Rclone always does a wildcard match so `\` must always escape a `\`.
|
||||
|
||||
## How the rules are used ##
|
||||
|
||||
Rclone maintains a list of include rules and exclude rules.
|
||||
|
||||
Each file is matched in order against the list until it finds a match.
|
||||
The file is then included or excluded according to the rule type.
|
||||
|
||||
If the matcher falls off the bottom of the list then the path is
|
||||
included.
|
||||
|
||||
For example given the following rules, `+` being include, `-` being
|
||||
exclude,
|
||||
|
||||
- secret*.jpg
|
||||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
- *
|
||||
|
||||
This would include
|
||||
|
||||
* `file1.jpg`
|
||||
* `file3.png`
|
||||
* `file2.avi`
|
||||
|
||||
This would exclude
|
||||
|
||||
* `secret17.jpg`
|
||||
* non `*.jpg` and `*.png`
|
||||
|
||||
## Adding filtering rules ##
|
||||
|
||||
Filtering rules are added with the following command line flags.
|
||||
|
||||
### `--exclude` - Exclude files matching pattern ###
|
||||
|
||||
Add a single exclude rule with `--exclude`.
|
||||
|
||||
Eg `--exclude *.bak` to exclude all bak files from the sync.
|
||||
|
||||
### `--exclude-from` - Read exclude patterns from file ###
|
||||
|
||||
Add exclude rules from a file.
|
||||
|
||||
Prepare a file like this `exclude-file.txt`
|
||||
|
||||
# a sample exclude rule file
|
||||
*.bak
|
||||
file2.jpg
|
||||
|
||||
Then use as `--exclude-from exclude-file.txt`. This will sync all
|
||||
files except those ending in `bak` and `file2.jpg`.
|
||||
|
||||
This is useful if you have a lot of rules.
|
||||
|
||||
### `--include` - Include files matching pattern ###
|
||||
|
||||
Add a single include rule with `--include`.
|
||||
|
||||
Eg `--include *.{png,jpg}` to include all `png` and `jpg` files in the
|
||||
backup and no others.
|
||||
|
||||
This adds an implicit `--exclude *` at the end of the filter list.
|
||||
|
||||
### `--include-from` - Read include patterns from file ###
|
||||
|
||||
Add include rules from a file.
|
||||
|
||||
Prepare a file like this `include-file.txt`
|
||||
|
||||
# a sample include rule file
|
||||
*.jpg
|
||||
*.png
|
||||
file2.avi
|
||||
|
||||
Then use as `--include-from include-file.txt`. This will sync all
|
||||
`jpg`, `png` files and `file2.avi`.
|
||||
|
||||
This is useful if you have a lot of rules.
|
||||
|
||||
This adds an implicit `--exclude *` at the end of the filter list.
|
||||
|
||||
### `--filter` - Add a file-filtering rule ###
|
||||
|
||||
This can be used to add a single include or exclude rule. Include
|
||||
rules start with `+ ` and exclude rules start with `- `. A special
|
||||
rule called `!` can be used to clear the existing rules.
|
||||
|
||||
Eg `--filter "- *.bak"` to exclude all bak files from the sync.
|
||||
|
||||
### `--filter-from` - Read filtering patterns from a file ###
|
||||
|
||||
Add include/exclude rules from a file.
|
||||
|
||||
Prepare a file like this `filter-file.txt`
|
||||
|
||||
# a sample exclude rule file
|
||||
- secret*.jpg
|
||||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
# exclude everything else
|
||||
- *
|
||||
|
||||
Then use as `--filter-from filter-file.txt`. The rules are processed
|
||||
in the order that they are defined.
|
||||
|
||||
This example will include all `jpg` and `png` files, exclude any files
|
||||
matching `secret*.jpg` and include `file2.avi`. Everything else will
|
||||
be excluded from the sync.
|
||||
|
||||
### `--files-from` - Read list of source-file names ###
|
||||
|
||||
This reads a list of file names from the file passed in and **only**
|
||||
these files are transferred. The filtering rules are ignored
|
||||
completely if you use this option.
|
||||
|
||||
Prepare a file like this `files-from.txt`
|
||||
|
||||
# comment
|
||||
file1.jpg
|
||||
file2.jpg
|
||||
|
||||
Then use as `--files-from files-from.txt`. This will only transfer
|
||||
`file1.jpg` and `file2.jpg` providing they exist.
|
||||
|
||||
### `--min-size` - Don't transfer any file smaller than this ###
|
||||
|
||||
This option controls the minimum size file which will be transferred.
|
||||
This defaults to `kBytes` but a suffix of `k`, `M`, or `G` can be
|
||||
used.
|
||||
|
||||
For example `--min-size 50k` means no files smaller than 50kByte will be
|
||||
transferred.
|
||||
|
||||
### `--max-size` - Don't transfer any file larger than this ###
|
||||
|
||||
This option controls the maximum size file which will be transferred.
|
||||
This defaults to `kBytes` but a suffix of `k`, `M`, or `G` can be
|
||||
used.
|
||||
|
||||
For example `--max-size 1G` means no files larger than 1GByte will be
|
||||
transferred.
|
||||
|
||||
### `--delete-excluded` - Delete files on dest excluded from sync ###
|
||||
|
||||
**Important** this flag is dangerous - use with `--dry-run` and `-v` first.
|
||||
|
||||
When doing `rclone sync` this will delete any files which are excluded
|
||||
from the sync on the destination.
|
||||
|
||||
If for example you did a sync from `A` to `B` without the `--min-size 50k` flag
|
||||
|
||||
rclone sync A: B:
|
||||
|
||||
Then you repeated it like this with the `--delete-excluded`
|
||||
|
||||
rclone --min-size 50k --delete-excluded sync A: B:
|
||||
|
||||
This would delete all files on `B` which are less than 50 kBytes as
|
||||
these are now excluded from the sync.
|
||||
|
||||
Always test first with `--dry-run` and `-v` before using this flag.
|
||||
|
||||
### `--dump-filters` - dump the filters to the output ###
|
||||
|
||||
This dumps the defined filters to the output as regular expressions.
|
||||
|
||||
Useful for debugging.
|
||||
|
||||
## Quoting shell metacharacters ##
|
||||
|
||||
The examples above may not work verbatim in your shell as they have
|
||||
shell metacharacters in them (eg `*`), and may require quoting.
|
||||
|
||||
Eg linux, OSX
|
||||
|
||||
* `--include \*.jpg`
|
||||
* `--include '*.jpg'`
|
||||
* `--include='*.jpg'`
|
||||
|
||||
In Windows the expansion is done by the command not the shell so this
|
||||
should work fine
|
||||
|
||||
* `--include *.jpg`
|
||||
@@ -35,9 +35,9 @@ Choose a number from below
|
||||
5) dropbox
|
||||
6) drive
|
||||
type> 4
|
||||
Google Application Client Id - leave blank to use rclone's.
|
||||
Google Application Client Id - leave blank normally.
|
||||
client_id>
|
||||
Google Application Client Secret - leave blank to use rclone's.
|
||||
Google Application Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Project number optional - needed only for list/create/delete buckets - see your developer console.
|
||||
project_number> 12345678
|
||||
@@ -73,7 +73,7 @@ Remote config
|
||||
Remote config
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
* Say N if you are working on a remote or headless machine or Y didn't work
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
|
||||
111
docs/content/onedrive.md
Normal file
111
docs/content/onedrive.md
Normal file
@@ -0,0 +1,111 @@
|
||||
---
|
||||
title: "Microsoft One Drive"
|
||||
description: "Rclone docs for Microsoft One Drive"
|
||||
date: "2015-10-14"
|
||||
---
|
||||
|
||||
<i class="fa fa-windows"></i> Microsoft One Drive
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
The initial setup for One Drive involves getting a token from
|
||||
Microsoft which you need to do in your browser. `rclone config` walks
|
||||
you through it.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
name> remote
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) amazon cloud drive
|
||||
2) drive
|
||||
3) dropbox
|
||||
4) google cloud storage
|
||||
5) local
|
||||
6) onedrive
|
||||
7) s3
|
||||
8) swift
|
||||
type> 6
|
||||
Microsoft App Client Id - leave blank normally.
|
||||
client_id>
|
||||
Microsoft App Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Remote config
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
client_secret =
|
||||
token = {"access_token":"XXXXXX"}
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Microsoft. This only runs from the moment it
|
||||
opens your browser to the moment you get back the verification
|
||||
code. This is on `http://127.0.0.1:53682/` and this it may require
|
||||
you to unblock it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your One Drive
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
List all the files in your One Drive
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to an One Drive directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
### Modified time and MD5SUMs ###
|
||||
|
||||
One Drive allows modification times to be set on objects accurate to 1
|
||||
second. These will be used to detect whether objects need syncing or
|
||||
not.
|
||||
|
||||
One drive does not support MD5SUMs. This means the `--checksum` flag
|
||||
will be equivalent to the `--size-only` flag.
|
||||
|
||||
### Deleting files ###
|
||||
|
||||
Any files you delete with rclone will end up in the trash. Microsoft
|
||||
doesn't provide an API to permanently delete files, nor to empty the
|
||||
trash, so you will have to do that with one of Microsoft's apps or via
|
||||
the One Drive website.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that One Drive is case sensitive so you can't have a
|
||||
file called "Hello.doc" and one called "hello.doc".
|
||||
|
||||
Rclone only supports your default One Drive, and doesn't work with One
|
||||
Drive for business. Both these issues may be fixed at some point
|
||||
depending on user demand!
|
||||
|
||||
There are quite a few characters that can't be in One Drive file
|
||||
names. These can't occur on Windows platforms, but on non-Windows
|
||||
platforms they are common. Rclone will map these names to and from an
|
||||
identical looking unicode equivalent. For example if a file has a `?`
|
||||
in it will be mapped to `?` instead.
|
||||
@@ -23,6 +23,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| Dropbox | No | No | Yes | No |
|
||||
| Google Cloud Storage | Yes | Yes | No | No |
|
||||
| Amazon Cloud Drive | Yes | No | Yes | No |
|
||||
| Microsoft One Drive | No | Yes | Yes | No |
|
||||
| The local filesystem | Yes | Yes | Depends | No |
|
||||
|
||||
### MD5SUM ###
|
||||
|
||||
@@ -4,7 +4,7 @@ description: "Rclone docs for Amazon S3"
|
||||
date: "2014-04-26"
|
||||
---
|
||||
|
||||
<i class="fa fa-archive"></i> Amazon S3
|
||||
<i class="fa fa-amazon"></i> Amazon S3
|
||||
---------------------------------------
|
||||
|
||||
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
|
||||
@@ -133,6 +133,44 @@ created in. If you attempt to access a bucket from the wrong region,
|
||||
you will get an error, `incorrect region, the bucket is not in 'XXX'
|
||||
region`.
|
||||
|
||||
### Anonymous access to public buckets ###
|
||||
|
||||
If you want to use rclone to access a public bucket, configure with a
|
||||
blank `access_key_id` and `secret_access_key`. Eg
|
||||
|
||||
```
|
||||
e) Edit existing remote
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
name> anons3
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) amazon cloud drive
|
||||
2) drive
|
||||
3) dropbox
|
||||
4) google cloud storage
|
||||
5) local
|
||||
6) s3
|
||||
7) swift
|
||||
type> 6
|
||||
AWS Access Key ID - leave blank for anonymous access.
|
||||
access_key_id>
|
||||
AWS Secret Access Key (password) - leave blank for anonymous access.
|
||||
secret_access_key>
|
||||
Region to connect to.
|
||||
region> 1
|
||||
endpoint>
|
||||
location_constraint>
|
||||
```
|
||||
|
||||
Then use it as normal with the name of the public bucket, eg
|
||||
|
||||
rclone lsd anons3:1000genomes
|
||||
|
||||
You will be able to list and copy data but not upload it.
|
||||
|
||||
### Ceph ###
|
||||
|
||||
Ceph is an object storage system which presents an Amazon S3 interface.
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
<ul class="dropdown-menu">
|
||||
<li><a href="/install/"><i class="fa fa-book"></i> Installation</a></li>
|
||||
<li><a href="/docs/"><i class="fa fa-book"></i> Usage</a></li>
|
||||
<li><a href="/filtering/"><i class="fa fa-book"></i> Filtering</a></li>
|
||||
<li><a href="/changelog/"><i class="fa fa-book"></i> Changelog</a></li>
|
||||
<li><a href="/bugs/"><i class="fa fa-book"></i> Bugs</a></li>
|
||||
<li><a href="/faq/"><i class="fa fa-book"></i> FAQ</a></li>
|
||||
@@ -31,11 +32,12 @@
|
||||
<ul class="dropdown-menu">
|
||||
<li><a href="/overview/"><i class="fa fa-archive"></i> Overview</a></li>
|
||||
<li><a href="/drive/"><i class="fa fa-google"></i> Drive</a></li>
|
||||
<li><a href="/s3/"><i class="fa fa-archive"></i> S3</a></li>
|
||||
<li><a href="/s3/"><i class="fa fa-amazon"></i> S3</a></li>
|
||||
<li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Swift</a></li>
|
||||
<li><a href="/dropbox/"><i class="fa fa-dropbox"></i> Dropbox</a></li>
|
||||
<li><a href="/googlecloudstorage/"><i class="fa fa-google"></i> Google Cloud Storage</a></li>
|
||||
<li><a href="/amazonclouddrive/"><i class="fa fa-archive"></i> Amazon Cloud Drive</a></li>
|
||||
<li><a href="/amazonclouddrive/"><i class="fa fa-amazon"></i> Amazon Cloud Drive</a></li>
|
||||
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft One Drive</a></li>
|
||||
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
542
docs/static/css/font-awesome.css
vendored
542
docs/static/css/font-awesome.css
vendored
@@ -1,22 +1,21 @@
|
||||
/*!
|
||||
* Font Awesome 4.1.0 by @davegandy - http://fontawesome.io - @fontawesome
|
||||
* Font Awesome 4.4.0 by @davegandy - http://fontawesome.io - @fontawesome
|
||||
* License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
|
||||
*/
|
||||
/* FONT PATH
|
||||
* -------------------------- */
|
||||
@font-face {
|
||||
font-family: 'FontAwesome';
|
||||
src: url('../fonts/fontawesome-webfont.eot?v=4.1.0');
|
||||
src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.1.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff?v=4.1.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.1.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.1.0#fontawesomeregular') format('svg');
|
||||
src: url('../fonts/fontawesome-webfont.eot?v=4.4.0');
|
||||
src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.4.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff2?v=4.4.0') format('woff2'), url('../fonts/fontawesome-webfont.woff?v=4.4.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.4.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.4.0#fontawesomeregular') format('svg');
|
||||
font-weight: normal;
|
||||
font-style: normal;
|
||||
}
|
||||
.fa {
|
||||
display: inline-block;
|
||||
font-family: FontAwesome;
|
||||
font-style: normal;
|
||||
font-weight: normal;
|
||||
line-height: 1;
|
||||
font: normal normal normal 14px/1 FontAwesome;
|
||||
font-size: inherit;
|
||||
text-rendering: auto;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
@@ -65,6 +64,19 @@
|
||||
border: solid 0.08em #eeeeee;
|
||||
border-radius: .1em;
|
||||
}
|
||||
.fa-pull-left {
|
||||
float: left;
|
||||
}
|
||||
.fa-pull-right {
|
||||
float: right;
|
||||
}
|
||||
.fa.fa-pull-left {
|
||||
margin-right: .3em;
|
||||
}
|
||||
.fa.fa-pull-right {
|
||||
margin-left: .3em;
|
||||
}
|
||||
/* Deprecated as of 4.4.0 */
|
||||
.pull-right {
|
||||
float: right;
|
||||
}
|
||||
@@ -78,36 +90,24 @@
|
||||
margin-left: .3em;
|
||||
}
|
||||
.fa-spin {
|
||||
-webkit-animation: spin 2s infinite linear;
|
||||
-moz-animation: spin 2s infinite linear;
|
||||
-o-animation: spin 2s infinite linear;
|
||||
animation: spin 2s infinite linear;
|
||||
-webkit-animation: fa-spin 2s infinite linear;
|
||||
animation: fa-spin 2s infinite linear;
|
||||
}
|
||||
@-moz-keyframes spin {
|
||||
0% {
|
||||
-moz-transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-moz-transform: rotate(359deg);
|
||||
}
|
||||
.fa-pulse {
|
||||
-webkit-animation: fa-spin 1s infinite steps(8);
|
||||
animation: fa-spin 1s infinite steps(8);
|
||||
}
|
||||
@-webkit-keyframes spin {
|
||||
@-webkit-keyframes fa-spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-webkit-transform: rotate(359deg);
|
||||
transform: rotate(359deg);
|
||||
}
|
||||
}
|
||||
@-o-keyframes spin {
|
||||
0% {
|
||||
-o-transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-o-transform: rotate(359deg);
|
||||
}
|
||||
}
|
||||
@keyframes spin {
|
||||
@keyframes fa-spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg);
|
||||
@@ -120,43 +120,40 @@
|
||||
.fa-rotate-90 {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1);
|
||||
-webkit-transform: rotate(90deg);
|
||||
-moz-transform: rotate(90deg);
|
||||
-ms-transform: rotate(90deg);
|
||||
-o-transform: rotate(90deg);
|
||||
transform: rotate(90deg);
|
||||
}
|
||||
.fa-rotate-180 {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2);
|
||||
-webkit-transform: rotate(180deg);
|
||||
-moz-transform: rotate(180deg);
|
||||
-ms-transform: rotate(180deg);
|
||||
-o-transform: rotate(180deg);
|
||||
transform: rotate(180deg);
|
||||
}
|
||||
.fa-rotate-270 {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);
|
||||
-webkit-transform: rotate(270deg);
|
||||
-moz-transform: rotate(270deg);
|
||||
-ms-transform: rotate(270deg);
|
||||
-o-transform: rotate(270deg);
|
||||
transform: rotate(270deg);
|
||||
}
|
||||
.fa-flip-horizontal {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);
|
||||
-webkit-transform: scale(-1, 1);
|
||||
-moz-transform: scale(-1, 1);
|
||||
-ms-transform: scale(-1, 1);
|
||||
-o-transform: scale(-1, 1);
|
||||
transform: scale(-1, 1);
|
||||
}
|
||||
.fa-flip-vertical {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);
|
||||
-webkit-transform: scale(1, -1);
|
||||
-moz-transform: scale(1, -1);
|
||||
-ms-transform: scale(1, -1);
|
||||
-o-transform: scale(1, -1);
|
||||
transform: scale(1, -1);
|
||||
}
|
||||
:root .fa-rotate-90,
|
||||
:root .fa-rotate-180,
|
||||
:root .fa-rotate-270,
|
||||
:root .fa-flip-horizontal,
|
||||
:root .fa-flip-vertical {
|
||||
filter: none;
|
||||
}
|
||||
.fa-stack {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
@@ -222,6 +219,8 @@
|
||||
.fa-check:before {
|
||||
content: "\f00c";
|
||||
}
|
||||
.fa-remove:before,
|
||||
.fa-close:before,
|
||||
.fa-times:before {
|
||||
content: "\f00d";
|
||||
}
|
||||
@@ -551,7 +550,8 @@
|
||||
.fa-arrows-h:before {
|
||||
content: "\f07e";
|
||||
}
|
||||
.fa-bar-chart-o:before {
|
||||
.fa-bar-chart-o:before,
|
||||
.fa-bar-chart:before {
|
||||
content: "\f080";
|
||||
}
|
||||
.fa-twitter-square:before {
|
||||
@@ -627,6 +627,7 @@
|
||||
.fa-twitter:before {
|
||||
content: "\f099";
|
||||
}
|
||||
.fa-facebook-f:before,
|
||||
.fa-facebook:before {
|
||||
content: "\f09a";
|
||||
}
|
||||
@@ -639,6 +640,7 @@
|
||||
.fa-credit-card:before {
|
||||
content: "\f09d";
|
||||
}
|
||||
.fa-feed:before,
|
||||
.fa-rss:before {
|
||||
content: "\f09e";
|
||||
}
|
||||
@@ -1276,7 +1278,8 @@
|
||||
.fa-male:before {
|
||||
content: "\f183";
|
||||
}
|
||||
.fa-gittip:before {
|
||||
.fa-gittip:before,
|
||||
.fa-gratipay:before {
|
||||
content: "\f184";
|
||||
}
|
||||
.fa-sun-o:before {
|
||||
@@ -1380,7 +1383,6 @@
|
||||
.fa-digg:before {
|
||||
content: "\f1a6";
|
||||
}
|
||||
.fa-pied-piper-square:before,
|
||||
.fa-pied-piper:before {
|
||||
content: "\f1a7";
|
||||
}
|
||||
@@ -1497,6 +1499,7 @@
|
||||
content: "\f1cc";
|
||||
}
|
||||
.fa-life-bouy:before,
|
||||
.fa-life-buoy:before,
|
||||
.fa-life-saver:before,
|
||||
.fa-support:before,
|
||||
.fa-life-ring:before {
|
||||
@@ -1519,6 +1522,8 @@
|
||||
.fa-git:before {
|
||||
content: "\f1d3";
|
||||
}
|
||||
.fa-y-combinator-square:before,
|
||||
.fa-yc-square:before,
|
||||
.fa-hacker-news:before {
|
||||
content: "\f1d4";
|
||||
}
|
||||
@@ -1564,3 +1569,458 @@
|
||||
.fa-bomb:before {
|
||||
content: "\f1e2";
|
||||
}
|
||||
.fa-soccer-ball-o:before,
|
||||
.fa-futbol-o:before {
|
||||
content: "\f1e3";
|
||||
}
|
||||
.fa-tty:before {
|
||||
content: "\f1e4";
|
||||
}
|
||||
.fa-binoculars:before {
|
||||
content: "\f1e5";
|
||||
}
|
||||
.fa-plug:before {
|
||||
content: "\f1e6";
|
||||
}
|
||||
.fa-slideshare:before {
|
||||
content: "\f1e7";
|
||||
}
|
||||
.fa-twitch:before {
|
||||
content: "\f1e8";
|
||||
}
|
||||
.fa-yelp:before {
|
||||
content: "\f1e9";
|
||||
}
|
||||
.fa-newspaper-o:before {
|
||||
content: "\f1ea";
|
||||
}
|
||||
.fa-wifi:before {
|
||||
content: "\f1eb";
|
||||
}
|
||||
.fa-calculator:before {
|
||||
content: "\f1ec";
|
||||
}
|
||||
.fa-paypal:before {
|
||||
content: "\f1ed";
|
||||
}
|
||||
.fa-google-wallet:before {
|
||||
content: "\f1ee";
|
||||
}
|
||||
.fa-cc-visa:before {
|
||||
content: "\f1f0";
|
||||
}
|
||||
.fa-cc-mastercard:before {
|
||||
content: "\f1f1";
|
||||
}
|
||||
.fa-cc-discover:before {
|
||||
content: "\f1f2";
|
||||
}
|
||||
.fa-cc-amex:before {
|
||||
content: "\f1f3";
|
||||
}
|
||||
.fa-cc-paypal:before {
|
||||
content: "\f1f4";
|
||||
}
|
||||
.fa-cc-stripe:before {
|
||||
content: "\f1f5";
|
||||
}
|
||||
.fa-bell-slash:before {
|
||||
content: "\f1f6";
|
||||
}
|
||||
.fa-bell-slash-o:before {
|
||||
content: "\f1f7";
|
||||
}
|
||||
.fa-trash:before {
|
||||
content: "\f1f8";
|
||||
}
|
||||
.fa-copyright:before {
|
||||
content: "\f1f9";
|
||||
}
|
||||
.fa-at:before {
|
||||
content: "\f1fa";
|
||||
}
|
||||
.fa-eyedropper:before {
|
||||
content: "\f1fb";
|
||||
}
|
||||
.fa-paint-brush:before {
|
||||
content: "\f1fc";
|
||||
}
|
||||
.fa-birthday-cake:before {
|
||||
content: "\f1fd";
|
||||
}
|
||||
.fa-area-chart:before {
|
||||
content: "\f1fe";
|
||||
}
|
||||
.fa-pie-chart:before {
|
||||
content: "\f200";
|
||||
}
|
||||
.fa-line-chart:before {
|
||||
content: "\f201";
|
||||
}
|
||||
.fa-lastfm:before {
|
||||
content: "\f202";
|
||||
}
|
||||
.fa-lastfm-square:before {
|
||||
content: "\f203";
|
||||
}
|
||||
.fa-toggle-off:before {
|
||||
content: "\f204";
|
||||
}
|
||||
.fa-toggle-on:before {
|
||||
content: "\f205";
|
||||
}
|
||||
.fa-bicycle:before {
|
||||
content: "\f206";
|
||||
}
|
||||
.fa-bus:before {
|
||||
content: "\f207";
|
||||
}
|
||||
.fa-ioxhost:before {
|
||||
content: "\f208";
|
||||
}
|
||||
.fa-angellist:before {
|
||||
content: "\f209";
|
||||
}
|
||||
.fa-cc:before {
|
||||
content: "\f20a";
|
||||
}
|
||||
.fa-shekel:before,
|
||||
.fa-sheqel:before,
|
||||
.fa-ils:before {
|
||||
content: "\f20b";
|
||||
}
|
||||
.fa-meanpath:before {
|
||||
content: "\f20c";
|
||||
}
|
||||
.fa-buysellads:before {
|
||||
content: "\f20d";
|
||||
}
|
||||
.fa-connectdevelop:before {
|
||||
content: "\f20e";
|
||||
}
|
||||
.fa-dashcube:before {
|
||||
content: "\f210";
|
||||
}
|
||||
.fa-forumbee:before {
|
||||
content: "\f211";
|
||||
}
|
||||
.fa-leanpub:before {
|
||||
content: "\f212";
|
||||
}
|
||||
.fa-sellsy:before {
|
||||
content: "\f213";
|
||||
}
|
||||
.fa-shirtsinbulk:before {
|
||||
content: "\f214";
|
||||
}
|
||||
.fa-simplybuilt:before {
|
||||
content: "\f215";
|
||||
}
|
||||
.fa-skyatlas:before {
|
||||
content: "\f216";
|
||||
}
|
||||
.fa-cart-plus:before {
|
||||
content: "\f217";
|
||||
}
|
||||
.fa-cart-arrow-down:before {
|
||||
content: "\f218";
|
||||
}
|
||||
.fa-diamond:before {
|
||||
content: "\f219";
|
||||
}
|
||||
.fa-ship:before {
|
||||
content: "\f21a";
|
||||
}
|
||||
.fa-user-secret:before {
|
||||
content: "\f21b";
|
||||
}
|
||||
.fa-motorcycle:before {
|
||||
content: "\f21c";
|
||||
}
|
||||
.fa-street-view:before {
|
||||
content: "\f21d";
|
||||
}
|
||||
.fa-heartbeat:before {
|
||||
content: "\f21e";
|
||||
}
|
||||
.fa-venus:before {
|
||||
content: "\f221";
|
||||
}
|
||||
.fa-mars:before {
|
||||
content: "\f222";
|
||||
}
|
||||
.fa-mercury:before {
|
||||
content: "\f223";
|
||||
}
|
||||
.fa-intersex:before,
|
||||
.fa-transgender:before {
|
||||
content: "\f224";
|
||||
}
|
||||
.fa-transgender-alt:before {
|
||||
content: "\f225";
|
||||
}
|
||||
.fa-venus-double:before {
|
||||
content: "\f226";
|
||||
}
|
||||
.fa-mars-double:before {
|
||||
content: "\f227";
|
||||
}
|
||||
.fa-venus-mars:before {
|
||||
content: "\f228";
|
||||
}
|
||||
.fa-mars-stroke:before {
|
||||
content: "\f229";
|
||||
}
|
||||
.fa-mars-stroke-v:before {
|
||||
content: "\f22a";
|
||||
}
|
||||
.fa-mars-stroke-h:before {
|
||||
content: "\f22b";
|
||||
}
|
||||
.fa-neuter:before {
|
||||
content: "\f22c";
|
||||
}
|
||||
.fa-genderless:before {
|
||||
content: "\f22d";
|
||||
}
|
||||
.fa-facebook-official:before {
|
||||
content: "\f230";
|
||||
}
|
||||
.fa-pinterest-p:before {
|
||||
content: "\f231";
|
||||
}
|
||||
.fa-whatsapp:before {
|
||||
content: "\f232";
|
||||
}
|
||||
.fa-server:before {
|
||||
content: "\f233";
|
||||
}
|
||||
.fa-user-plus:before {
|
||||
content: "\f234";
|
||||
}
|
||||
.fa-user-times:before {
|
||||
content: "\f235";
|
||||
}
|
||||
.fa-hotel:before,
|
||||
.fa-bed:before {
|
||||
content: "\f236";
|
||||
}
|
||||
.fa-viacoin:before {
|
||||
content: "\f237";
|
||||
}
|
||||
.fa-train:before {
|
||||
content: "\f238";
|
||||
}
|
||||
.fa-subway:before {
|
||||
content: "\f239";
|
||||
}
|
||||
.fa-medium:before {
|
||||
content: "\f23a";
|
||||
}
|
||||
.fa-yc:before,
|
||||
.fa-y-combinator:before {
|
||||
content: "\f23b";
|
||||
}
|
||||
.fa-optin-monster:before {
|
||||
content: "\f23c";
|
||||
}
|
||||
.fa-opencart:before {
|
||||
content: "\f23d";
|
||||
}
|
||||
.fa-expeditedssl:before {
|
||||
content: "\f23e";
|
||||
}
|
||||
.fa-battery-4:before,
|
||||
.fa-battery-full:before {
|
||||
content: "\f240";
|
||||
}
|
||||
.fa-battery-3:before,
|
||||
.fa-battery-three-quarters:before {
|
||||
content: "\f241";
|
||||
}
|
||||
.fa-battery-2:before,
|
||||
.fa-battery-half:before {
|
||||
content: "\f242";
|
||||
}
|
||||
.fa-battery-1:before,
|
||||
.fa-battery-quarter:before {
|
||||
content: "\f243";
|
||||
}
|
||||
.fa-battery-0:before,
|
||||
.fa-battery-empty:before {
|
||||
content: "\f244";
|
||||
}
|
||||
.fa-mouse-pointer:before {
|
||||
content: "\f245";
|
||||
}
|
||||
.fa-i-cursor:before {
|
||||
content: "\f246";
|
||||
}
|
||||
.fa-object-group:before {
|
||||
content: "\f247";
|
||||
}
|
||||
.fa-object-ungroup:before {
|
||||
content: "\f248";
|
||||
}
|
||||
.fa-sticky-note:before {
|
||||
content: "\f249";
|
||||
}
|
||||
.fa-sticky-note-o:before {
|
||||
content: "\f24a";
|
||||
}
|
||||
.fa-cc-jcb:before {
|
||||
content: "\f24b";
|
||||
}
|
||||
.fa-cc-diners-club:before {
|
||||
content: "\f24c";
|
||||
}
|
||||
.fa-clone:before {
|
||||
content: "\f24d";
|
||||
}
|
||||
.fa-balance-scale:before {
|
||||
content: "\f24e";
|
||||
}
|
||||
.fa-hourglass-o:before {
|
||||
content: "\f250";
|
||||
}
|
||||
.fa-hourglass-1:before,
|
||||
.fa-hourglass-start:before {
|
||||
content: "\f251";
|
||||
}
|
||||
.fa-hourglass-2:before,
|
||||
.fa-hourglass-half:before {
|
||||
content: "\f252";
|
||||
}
|
||||
.fa-hourglass-3:before,
|
||||
.fa-hourglass-end:before {
|
||||
content: "\f253";
|
||||
}
|
||||
.fa-hourglass:before {
|
||||
content: "\f254";
|
||||
}
|
||||
.fa-hand-grab-o:before,
|
||||
.fa-hand-rock-o:before {
|
||||
content: "\f255";
|
||||
}
|
||||
.fa-hand-stop-o:before,
|
||||
.fa-hand-paper-o:before {
|
||||
content: "\f256";
|
||||
}
|
||||
.fa-hand-scissors-o:before {
|
||||
content: "\f257";
|
||||
}
|
||||
.fa-hand-lizard-o:before {
|
||||
content: "\f258";
|
||||
}
|
||||
.fa-hand-spock-o:before {
|
||||
content: "\f259";
|
||||
}
|
||||
.fa-hand-pointer-o:before {
|
||||
content: "\f25a";
|
||||
}
|
||||
.fa-hand-peace-o:before {
|
||||
content: "\f25b";
|
||||
}
|
||||
.fa-trademark:before {
|
||||
content: "\f25c";
|
||||
}
|
||||
.fa-registered:before {
|
||||
content: "\f25d";
|
||||
}
|
||||
.fa-creative-commons:before {
|
||||
content: "\f25e";
|
||||
}
|
||||
.fa-gg:before {
|
||||
content: "\f260";
|
||||
}
|
||||
.fa-gg-circle:before {
|
||||
content: "\f261";
|
||||
}
|
||||
.fa-tripadvisor:before {
|
||||
content: "\f262";
|
||||
}
|
||||
.fa-odnoklassniki:before {
|
||||
content: "\f263";
|
||||
}
|
||||
.fa-odnoklassniki-square:before {
|
||||
content: "\f264";
|
||||
}
|
||||
.fa-get-pocket:before {
|
||||
content: "\f265";
|
||||
}
|
||||
.fa-wikipedia-w:before {
|
||||
content: "\f266";
|
||||
}
|
||||
.fa-safari:before {
|
||||
content: "\f267";
|
||||
}
|
||||
.fa-chrome:before {
|
||||
content: "\f268";
|
||||
}
|
||||
.fa-firefox:before {
|
||||
content: "\f269";
|
||||
}
|
||||
.fa-opera:before {
|
||||
content: "\f26a";
|
||||
}
|
||||
.fa-internet-explorer:before {
|
||||
content: "\f26b";
|
||||
}
|
||||
.fa-tv:before,
|
||||
.fa-television:before {
|
||||
content: "\f26c";
|
||||
}
|
||||
.fa-contao:before {
|
||||
content: "\f26d";
|
||||
}
|
||||
.fa-500px:before {
|
||||
content: "\f26e";
|
||||
}
|
||||
.fa-amazon:before {
|
||||
content: "\f270";
|
||||
}
|
||||
.fa-calendar-plus-o:before {
|
||||
content: "\f271";
|
||||
}
|
||||
.fa-calendar-minus-o:before {
|
||||
content: "\f272";
|
||||
}
|
||||
.fa-calendar-times-o:before {
|
||||
content: "\f273";
|
||||
}
|
||||
.fa-calendar-check-o:before {
|
||||
content: "\f274";
|
||||
}
|
||||
.fa-industry:before {
|
||||
content: "\f275";
|
||||
}
|
||||
.fa-map-pin:before {
|
||||
content: "\f276";
|
||||
}
|
||||
.fa-map-signs:before {
|
||||
content: "\f277";
|
||||
}
|
||||
.fa-map-o:before {
|
||||
content: "\f278";
|
||||
}
|
||||
.fa-map:before {
|
||||
content: "\f279";
|
||||
}
|
||||
.fa-commenting:before {
|
||||
content: "\f27a";
|
||||
}
|
||||
.fa-commenting-o:before {
|
||||
content: "\f27b";
|
||||
}
|
||||
.fa-houzz:before {
|
||||
content: "\f27c";
|
||||
}
|
||||
.fa-vimeo:before {
|
||||
content: "\f27d";
|
||||
}
|
||||
.fa-black-tie:before {
|
||||
content: "\f27e";
|
||||
}
|
||||
.fa-fonticons:before {
|
||||
content: "\f280";
|
||||
}
|
||||
|
||||
BIN
docs/static/fonts/FontAwesome.otf
vendored
BIN
docs/static/fonts/FontAwesome.otf
vendored
Binary file not shown.
BIN
docs/static/fonts/fontawesome-webfont.eot
vendored
Executable file → Normal file
BIN
docs/static/fonts/fontawesome-webfont.eot
vendored
Executable file → Normal file
Binary file not shown.
1064
docs/static/fonts/fontawesome-webfont.svg
vendored
Executable file → Normal file
1064
docs/static/fonts/fontawesome-webfont.svg
vendored
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
|
Before Width: | Height: | Size: 248 KiB After Width: | Height: | Size: 348 KiB |
BIN
docs/static/fonts/fontawesome-webfont.ttf
vendored
Executable file → Normal file
BIN
docs/static/fonts/fontawesome-webfont.ttf
vendored
Executable file → Normal file
Binary file not shown.
BIN
docs/static/fonts/fontawesome-webfont.woff
vendored
Executable file → Normal file
BIN
docs/static/fonts/fontawesome-webfont.woff
vendored
Executable file → Normal file
Binary file not shown.
BIN
docs/static/fonts/fontawesome-webfont.woff2
vendored
Normal file
BIN
docs/static/fonts/fontawesome-webfont.woff2
vendored
Normal file
Binary file not shown.
193
drive/drive.go
193
drive/drive.go
@@ -72,18 +72,18 @@ func init() {
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: oauthutil.ConfigClientID,
|
||||
Help: "Google Application Client Id - leave blank to use rclone's.",
|
||||
Help: "Google Application Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: oauthutil.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret - leave blank to use rclone's.",
|
||||
Help: "Google Application Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
pflag.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
pflag.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
|
||||
}
|
||||
|
||||
// FsDrive represents a remote drive server
|
||||
type FsDrive struct {
|
||||
// Fs represents a remote drive server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
svc *drive.Service // the connection to the drive server
|
||||
root string // the path we are working on
|
||||
@@ -93,60 +93,51 @@ type FsDrive struct {
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// FsObjectDrive describes a drive object
|
||||
type FsObjectDrive struct {
|
||||
drive *FsDrive // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // Drive Id of this object
|
||||
url string // Download URL of this object
|
||||
md5sum string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
modifiedDate string // RFC3339 time it was last modified
|
||||
// Object describes a drive object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // Drive Id of this object
|
||||
url string // Download URL of this object
|
||||
md5sum string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
modifiedDate string // RFC3339 time it was last modified
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *FsDrive) Name() string {
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *FsDrive) Root() string {
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this FsDrive to a string
|
||||
func (f *FsDrive) String() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Google drive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// shouldRetry determines whehter a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
again = false
|
||||
errOut = err
|
||||
if err != nil {
|
||||
// Check for net error Timeout()
|
||||
if x, ok := err.(interface {
|
||||
Timeout() bool
|
||||
}); ok && x.Timeout() {
|
||||
if fs.ShouldRetry(err) {
|
||||
again = true
|
||||
}
|
||||
// Check for net error Temporary()
|
||||
if x, ok := err.(interface {
|
||||
Temporary() bool
|
||||
}); ok && x.Temporary() {
|
||||
again = true
|
||||
}
|
||||
switch gerr := err.(type) {
|
||||
case *googleapi.Error:
|
||||
if gerr.Code >= 500 && gerr.Code < 600 {
|
||||
// All 5xx errors should be retried
|
||||
again = true
|
||||
} else if len(gerr.Errors) > 0 {
|
||||
reason := gerr.Errors[0].Reason
|
||||
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
|
||||
} else {
|
||||
switch gerr := err.(type) {
|
||||
case *googleapi.Error:
|
||||
if gerr.Code >= 500 && gerr.Code < 600 {
|
||||
// All 5xx errors should be retried
|
||||
again = true
|
||||
} else if len(gerr.Errors) > 0 {
|
||||
reason := gerr.Errors[0].Reason
|
||||
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
|
||||
again = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -170,7 +161,7 @@ type listAllFn func(*drive.File) bool
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
//
|
||||
// Search params: https://developers.google.com/drive/search-parameters
|
||||
func (f *FsDrive) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
query := fmt.Sprintf("trashed=false")
|
||||
if dirID != "" {
|
||||
query += fmt.Sprintf(" and '%s' in parents", dirID)
|
||||
@@ -225,7 +216,7 @@ func isPowerOfTwo(x int64) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs contstructs an FsDrive from the path, container:path
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, path string) (fs.Fs, error) {
|
||||
if !isPowerOfTwo(int64(chunkSize)) {
|
||||
return nil, fmt.Errorf("drive: chunk size %v isn't a power of two", chunkSize)
|
||||
@@ -244,7 +235,7 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &FsDrive{
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
@@ -295,9 +286,9 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
func (f *FsDrive) newFsObjectWithInfoErr(remote string, info *drive.File) (fs.Object, error) {
|
||||
fs := &FsObjectDrive{
|
||||
drive: f,
|
||||
func (f *Fs) newFsObjectWithInfoErr(remote string, info *drive.File) (fs.Object, error) {
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
@@ -315,7 +306,7 @@ func (f *FsDrive) newFsObjectWithInfoErr(remote string, info *drive.File) (fs.Ob
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) fs.Object {
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info *drive.File) fs.Object {
|
||||
fs, _ := f.newFsObjectWithInfoErr(remote, info)
|
||||
// Errors have already been logged
|
||||
return fs
|
||||
@@ -324,12 +315,12 @@ func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) fs.Object
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDrive) NewFsObject(remote string) fs.Object {
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *FsDrive) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(pathID, leaf, true, false, func(item *drive.File) bool {
|
||||
if item.Title == leaf {
|
||||
@@ -342,7 +333,7 @@ func (f *FsDrive) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, e
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *FsDrive) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
// fmt.Println("Making", path)
|
||||
// Define the metadata for the directory we are going to create.
|
||||
createInfo := &drive.File{
|
||||
@@ -368,7 +359,7 @@ func (f *FsDrive) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
//
|
||||
// This fetches the minimum amount of stuff but does more API calls
|
||||
// which makes it slow
|
||||
func (f *FsDrive) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
|
||||
func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
|
||||
var subError error
|
||||
// Make the API request
|
||||
var wg sync.WaitGroup
|
||||
@@ -416,7 +407,7 @@ func (f *FsDrive) listDirRecursive(dirID string, path string, out fs.ObjectsChan
|
||||
//
|
||||
// This is fast in terms of number of API calls, but slow in terms of
|
||||
// fetching more data than it needs
|
||||
func (f *FsDrive) listDirFull(dirID string, path string, out fs.ObjectsChan) error {
|
||||
func (f *Fs) listDirFull(dirID string, path string, out fs.ObjectsChan) error {
|
||||
// Orphans waiting for their parent
|
||||
orphans := make(map[string][]*drive.File)
|
||||
|
||||
@@ -478,7 +469,7 @@ func (f *FsDrive) listDirFull(dirID string, path string, out fs.ObjectsChan) err
|
||||
}
|
||||
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *FsDrive) List() fs.ObjectsChan {
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
@@ -502,7 +493,7 @@ func (f *FsDrive) List() fs.ObjectsChan {
|
||||
}
|
||||
|
||||
// ListDir walks the path returning a channel of directories
|
||||
func (f *FsDrive) ListDir() fs.DirChan {
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
@@ -531,13 +522,13 @@ func (f *FsDrive) ListDir() fs.DirChan {
|
||||
}
|
||||
|
||||
// Creates a drive.File info from the parameters passed in and a half
|
||||
// finished FsObjectDrive which must have setMetaData called on it
|
||||
// finished Object which must have setMetaData called on it
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *FsDrive) createFileInfo(remote string, modTime time.Time, size int64) (*FsObjectDrive, *drive.File, error) {
|
||||
// Temporary FsObject under construction
|
||||
o := &FsObjectDrive{
|
||||
drive: f,
|
||||
func (f *Fs) createFileInfo(remote string, modTime time.Time, size int64) (*Object, *drive.File, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
bytes: size,
|
||||
}
|
||||
@@ -567,7 +558,7 @@ func (f *FsDrive) createFileInfo(remote string, modTime time.Time, size int64) (
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
o, createInfo, err := f.createFileInfo(remote, modTime, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -596,14 +587,14 @@ func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *FsDrive) Mkdir() error {
|
||||
func (f *Fs) Mkdir() error {
|
||||
return f.dirCache.FindRoot(true)
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsDrive) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -638,7 +629,7 @@ func (f *FsDrive) Rmdir() error {
|
||||
}
|
||||
|
||||
// Precision of the object storage system
|
||||
func (f *FsDrive) Precision() time.Duration {
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
@@ -651,8 +642,8 @@ func (f *FsDrive) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsDrive) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectDrive)
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
@@ -664,8 +655,8 @@ func (f *FsDrive) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
err = o.drive.pacer.Call(func() (bool, error) {
|
||||
info, err = o.drive.svc.Files.Copy(srcObj.id, createInfo).Do()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -681,7 +672,7 @@ func (f *FsDrive) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *FsDrive) Purge() error {
|
||||
func (f *Fs) Purge() error {
|
||||
if f.root == "" {
|
||||
return fmt.Errorf("Can't purge root directory")
|
||||
}
|
||||
@@ -713,8 +704,8 @@ func (f *FsDrive) Purge() error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *FsDrive) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectDrive)
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
@@ -744,8 +735,8 @@ func (f *FsDrive) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *FsDrive) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*FsDrive)
|
||||
func (f *Fs) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debug(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
@@ -780,12 +771,12 @@ func (f *FsDrive) DirMove(src fs.Fs) error {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *FsObjectDrive) Fs() fs.Fs {
|
||||
return o.drive
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectDrive) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
@@ -793,22 +784,22 @@ func (o *FsObjectDrive) String() string {
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *FsObjectDrive) Remote() string {
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectDrive) Md5sum() (string, error) {
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
return o.md5sum, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectDrive) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// setMetaData sets the fs data from a drive.File
|
||||
func (o *FsObjectDrive) setMetaData(info *drive.File) {
|
||||
func (o *Object) setMetaData(info *drive.File) {
|
||||
o.id = info.Id
|
||||
o.url = info.DownloadUrl
|
||||
o.md5sum = strings.ToLower(info.Md5Checksum)
|
||||
@@ -817,17 +808,17 @@ func (o *FsObjectDrive) setMetaData(info *drive.File) {
|
||||
}
|
||||
|
||||
// readMetaData gets the info if it hasn't already been fetched
|
||||
func (o *FsObjectDrive) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.id != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
leaf, directoryID, err := o.drive.dirCache.FindPath(o.remote, false)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(o.remote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
found, err := o.drive.listAll(directoryID, leaf, false, true, func(item *drive.File) bool {
|
||||
found, err := o.fs.listAll(directoryID, leaf, false, true, func(item *drive.File) bool {
|
||||
if item.Title == leaf {
|
||||
o.setMetaData(item)
|
||||
return true
|
||||
@@ -849,7 +840,7 @@ func (o *FsObjectDrive) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectDrive) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
@@ -864,7 +855,7 @@ func (o *FsObjectDrive) ModTime() time.Time {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the drive fs object
|
||||
func (o *FsObjectDrive) SetModTime(modTime time.Time) {
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
@@ -877,8 +868,8 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) {
|
||||
}
|
||||
// Set modified date
|
||||
var info *drive.File
|
||||
err = o.drive.pacer.Call(func() (bool, error) {
|
||||
info, err = o.drive.svc.Files.Update(o.id, updateInfo).SetModifiedDate(true).Do()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Update(o.id, updateInfo).SetModifiedDate(true).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -891,12 +882,12 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) {
|
||||
}
|
||||
|
||||
// Storable returns a boolean as to whether this object is storable
|
||||
func (o *FsObjectDrive) Storable() bool {
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
if o.url == "" {
|
||||
return nil, fmt.Errorf("Forbidden to download - check sharing permission")
|
||||
}
|
||||
@@ -906,8 +897,8 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
|
||||
}
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
var res *http.Response
|
||||
err = o.drive.pacer.Call(func() (bool, error) {
|
||||
res, err = o.drive.client.Do(req)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -925,7 +916,7 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
updateInfo := &drive.File{
|
||||
Id: o.id,
|
||||
ModifiedDate: modTime.Format(timeFormatOut),
|
||||
@@ -936,8 +927,8 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro
|
||||
var info *drive.File
|
||||
if size == 0 || size < int64(driveUploadCutoff) {
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.drive.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = o.drive.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in).Do()
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -945,7 +936,7 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro
|
||||
}
|
||||
} else {
|
||||
// Upload the file in chunks
|
||||
info, err = o.drive.Upload(in, size, fs.MimeType(o), updateInfo, o.remote)
|
||||
info, err = o.fs.Upload(in, size, fs.MimeType(o), updateInfo, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -955,13 +946,13 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectDrive) Remove() error {
|
||||
func (o *Object) Remove() error {
|
||||
var err error
|
||||
err = o.drive.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
if *driveUseTrash {
|
||||
_, err = o.drive.svc.Files.Trash(o.id).Do()
|
||||
_, err = o.fs.svc.Files.Trash(o.id).Do()
|
||||
} else {
|
||||
err = o.drive.svc.Files.Delete(o.id).Do()
|
||||
err = o.fs.svc.Files.Delete(o.id).Do()
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
@@ -970,10 +961,10 @@ func (o *FsObjectDrive) Remove() error {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*FsDrive)(nil)
|
||||
_ fs.Purger = (*FsDrive)(nil)
|
||||
_ fs.Copier = (*FsDrive)(nil)
|
||||
_ fs.Mover = (*FsDrive)(nil)
|
||||
_ fs.DirMover = (*FsDrive)(nil)
|
||||
_ fs.Object = (*FsObjectDrive)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*drive.FsObjectDrive)(nil))
|
||||
fstests.NilObject = fs.Object((*drive.Object)(nil))
|
||||
fstests.RemoteName = "TestDrive:"
|
||||
}
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ const (
|
||||
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
// It is not used by developers directly.
|
||||
type resumableUpload struct {
|
||||
f *FsDrive
|
||||
f *Fs
|
||||
remote string
|
||||
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
|
||||
URI string
|
||||
@@ -51,7 +51,7 @@ type resumableUpload struct {
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) {
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) {
|
||||
fileID := info.Id
|
||||
var body io.Reader
|
||||
body, err := googleapi.WithoutDataWrapper.JSONReader(info)
|
||||
|
||||
@@ -50,10 +50,10 @@ func init() {
|
||||
Config: configHelper,
|
||||
Options: []fs.Option{{
|
||||
Name: "app_key",
|
||||
Help: "Dropbox App Key - leave blank to use rclone's.",
|
||||
Help: "Dropbox App Key - leave blank normally.",
|
||||
}, {
|
||||
Name: "app_secret",
|
||||
Help: "Dropbox App Secret - leave blank to use rclone's.",
|
||||
Help: "Dropbox App Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
pflag.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
||||
@@ -92,8 +92,8 @@ func configHelper(name string) {
|
||||
}
|
||||
}
|
||||
|
||||
// FsDropbox represents a remote dropbox server
|
||||
type FsDropbox struct {
|
||||
// Fs represents a remote dropbox server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
db *dropbox.Dropbox // the connection to the dropbox server
|
||||
root string // the path we are working on
|
||||
@@ -101,29 +101,29 @@ type FsDropbox struct {
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
}
|
||||
|
||||
// FsObjectDropbox describes a dropbox object
|
||||
type FsObjectDropbox struct {
|
||||
dropbox *FsDropbox // what this object is part of
|
||||
remote string // The remote path
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
hasMetadata bool // metadata is valid
|
||||
// Object describes a dropbox object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
hasMetadata bool // metadata is valid
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *FsDropbox) Name() string {
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *FsDropbox) Root() string {
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this FsDropbox to a string
|
||||
func (f *FsDropbox) String() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Dropbox root '%s'", f.root)
|
||||
}
|
||||
|
||||
@@ -144,7 +144,7 @@ func newDropbox(name string) (*dropbox.Dropbox, error) {
|
||||
return db, err
|
||||
}
|
||||
|
||||
// NewFs contstructs an FsDropbox from the path, container:path
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadChunkSize > maxUploadChunkSize {
|
||||
return nil, fmt.Errorf("Chunk size too big, must be < %v", maxUploadChunkSize)
|
||||
@@ -153,7 +153,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &FsDropbox{
|
||||
f := &Fs{
|
||||
name: name,
|
||||
db: db,
|
||||
}
|
||||
@@ -186,7 +186,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Sets root in f
|
||||
func (f *FsDropbox) setRoot(root string) {
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = strings.Trim(root, "/")
|
||||
lowerCaseRoot := strings.ToLower(f.root)
|
||||
|
||||
@@ -200,10 +200,10 @@ func (f *FsDropbox) setRoot(root string) {
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
|
||||
o := &FsObjectDropbox{
|
||||
dropbox: f,
|
||||
remote: remote,
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
o.setMetadataFromEntry(info)
|
||||
@@ -220,12 +220,12 @@ func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.O
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDropbox) NewFsObject(remote string) fs.Object {
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Strips the root off path and returns it
|
||||
func (f *FsDropbox) stripRoot(path string) *string {
|
||||
func (f *Fs) stripRoot(path string) *string {
|
||||
lowercase := strings.ToLower(path)
|
||||
|
||||
if !strings.HasPrefix(lowercase, f.slashRootSlash) {
|
||||
@@ -239,7 +239,7 @@ func (f *FsDropbox) stripRoot(path string) *string {
|
||||
}
|
||||
|
||||
// Walk the root returning a channel of FsObjects
|
||||
func (f *FsDropbox) list(out fs.ObjectsChan) {
|
||||
func (f *Fs) list(out fs.ObjectsChan) {
|
||||
// Track path component case, it could be different for entries coming from DropBox API
|
||||
// See https://www.dropboxforum.com/hc/communities/public/questions/201665409-Wrong-character-case-of-folder-name-when-calling-listFolder-using-Sync-API?locale=en-us
|
||||
// and https://github.com/ncw/rclone/issues/53
|
||||
@@ -318,7 +318,7 @@ func (f *FsDropbox) list(out fs.ObjectsChan) {
|
||||
}
|
||||
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *FsDropbox) List() fs.ObjectsChan {
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
@@ -328,7 +328,7 @@ func (f *FsDropbox) List() fs.ObjectsChan {
|
||||
}
|
||||
|
||||
// ListDir walks the path returning a channel of FsObjects
|
||||
func (f *FsDropbox) ListDir() fs.DirChan {
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
@@ -379,14 +379,17 @@ func (rc *readCloser) Close() error {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsDropbox) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
o := &FsObjectDropbox{dropbox: f, remote: remote}
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, o.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *FsDropbox) Mkdir() error {
|
||||
func (f *Fs) Mkdir() error {
|
||||
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
|
||||
if err == nil {
|
||||
if entry.IsDir {
|
||||
@@ -401,7 +404,7 @@ func (f *FsDropbox) Mkdir() error {
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsDropbox) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
entry, err := f.db.Metadata(f.slashRoot, true, false, "", "", 16)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -413,7 +416,7 @@ func (f *FsDropbox) Rmdir() error {
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *FsDropbox) Precision() time.Duration {
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
@@ -426,15 +429,18 @@ func (f *FsDropbox) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsDropbox) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectDropbox)
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj := &FsObjectDropbox{dropbox: f, remote: remote}
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
srcPath := srcObj.remotePath()
|
||||
dstPath := dstObj.remotePath()
|
||||
@@ -451,7 +457,7 @@ func (f *FsDropbox) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *FsDropbox) Purge() error {
|
||||
func (f *Fs) Purge() error {
|
||||
// Let dropbox delete the filesystem tree
|
||||
_, err := f.db.Delete(f.slashRoot)
|
||||
return err
|
||||
@@ -466,15 +472,18 @@ func (f *FsDropbox) Purge() error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *FsDropbox) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectDropbox)
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj := &FsObjectDropbox{dropbox: f, remote: remote}
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
srcPath := srcObj.remotePath()
|
||||
dstPath := dstObj.remotePath()
|
||||
@@ -493,8 +502,8 @@ func (f *FsDropbox) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *FsDropbox) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*FsDropbox)
|
||||
func (f *Fs) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debug(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
@@ -517,12 +526,12 @@ func (f *FsDropbox) DirMove(src fs.Fs) error {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *FsObjectDropbox) Fs() fs.Fs {
|
||||
return o.dropbox
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectDropbox) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
@@ -530,32 +539,32 @@ func (o *FsObjectDropbox) String() string {
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *FsObjectDropbox) Remote() string {
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectDropbox) Md5sum() (string, error) {
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectDropbox) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a dropbox.Entry
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
func (o *FsObjectDropbox) setMetadataFromEntry(info *dropbox.Entry) {
|
||||
func (o *Object) setMetadataFromEntry(info *dropbox.Entry) {
|
||||
o.bytes = info.Bytes
|
||||
o.modTime = time.Time(info.ClientMtime)
|
||||
o.hasMetadata = true
|
||||
}
|
||||
|
||||
// Reads the entry from dropbox
|
||||
func (o *FsObjectDropbox) readEntry() (*dropbox.Entry, error) {
|
||||
entry, err := o.dropbox.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit)
|
||||
func (o *Object) readEntry() (*dropbox.Entry, error) {
|
||||
entry, err := o.fs.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Error reading file: %s", err)
|
||||
return nil, fmt.Errorf("Error reading file: %s", err)
|
||||
@@ -564,7 +573,7 @@ func (o *FsObjectDropbox) readEntry() (*dropbox.Entry, error) {
|
||||
}
|
||||
|
||||
// Read entry if not set and set metadata from it
|
||||
func (o *FsObjectDropbox) readEntryAndSetMetadata() error {
|
||||
func (o *Object) readEntryAndSetMetadata() error {
|
||||
// Last resort set time from client
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
@@ -578,8 +587,8 @@ func (o *FsObjectDropbox) readEntryAndSetMetadata() error {
|
||||
}
|
||||
|
||||
// Returns the remote path for the object
|
||||
func (o *FsObjectDropbox) remotePath() string {
|
||||
return o.dropbox.slashRootSlash + o.remote
|
||||
func (o *Object) remotePath() string {
|
||||
return o.fs.slashRootSlash + o.remote
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database for a given path
|
||||
@@ -592,12 +601,12 @@ func metadataKey(path string) string {
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database
|
||||
func (o *FsObjectDropbox) metadataKey() string {
|
||||
func (o *Object) metadataKey() string {
|
||||
return metadataKey(o.remotePath())
|
||||
}
|
||||
|
||||
// readMetaData gets the info if it hasn't already been fetched
|
||||
func (o *FsObjectDropbox) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetadata {
|
||||
return nil
|
||||
}
|
||||
@@ -609,7 +618,7 @@ func (o *FsObjectDropbox) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectDropbox) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
@@ -621,19 +630,19 @@ func (o *FsObjectDropbox) ModTime() time.Time {
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
//
|
||||
// Commits the datastore
|
||||
func (o *FsObjectDropbox) SetModTime(modTime time.Time) {
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
// FIXME not implemented
|
||||
return
|
||||
}
|
||||
|
||||
// Storable returns whether this object is storable
|
||||
func (o *FsObjectDropbox) Storable() bool {
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
|
||||
in, _, err = o.dropbox.db.Download(o.remotePath(), "", 0)
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
in, _, err = o.fs.db.Download(o.remotePath(), "", 0)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -642,13 +651,13 @@ func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
fs.ErrorLog(o, "File name disallowed - not uploading")
|
||||
return nil
|
||||
}
|
||||
entry, err := o.dropbox.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
|
||||
entry, err := o.fs.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Upload failed: %s", err)
|
||||
}
|
||||
@@ -657,17 +666,17 @@ func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) er
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectDropbox) Remove() error {
|
||||
_, err := o.dropbox.db.Delete(o.remotePath())
|
||||
func (o *Object) Remove() error {
|
||||
_, err := o.fs.db.Delete(o.remotePath())
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*FsDropbox)(nil)
|
||||
_ fs.Copier = (*FsDropbox)(nil)
|
||||
_ fs.Purger = (*FsDropbox)(nil)
|
||||
_ fs.Mover = (*FsDropbox)(nil)
|
||||
_ fs.DirMover = (*FsDropbox)(nil)
|
||||
_ fs.Object = (*FsObjectDropbox)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*dropbox.FsObjectDropbox)(nil))
|
||||
fstests.NilObject = fs.Object((*dropbox.Object)(nil))
|
||||
fstests.RemoteName = "TestDropbox:"
|
||||
}
|
||||
|
||||
|
||||
@@ -256,6 +256,7 @@ type Account struct {
|
||||
lpTime time.Time // Time of last average measurement
|
||||
lpBytes int // Number of bytes read since last measurement
|
||||
avg ewma.MovingAverage // Moving average of last few measurements
|
||||
closed bool // set if the file is closed
|
||||
exit chan struct{} // channel that will be closed when transfer is finished
|
||||
}
|
||||
|
||||
@@ -408,9 +409,13 @@ func (file *Account) String() string {
|
||||
|
||||
// Close the object
|
||||
func (file *Account) Close() error {
|
||||
close(file.exit)
|
||||
file.mu.Lock()
|
||||
defer file.mu.Unlock()
|
||||
if file.closed {
|
||||
return nil
|
||||
}
|
||||
file.closed = true
|
||||
close(file.exit)
|
||||
Stats.inProgress.clear(file.name)
|
||||
return file.in.Close()
|
||||
}
|
||||
|
||||
199
fs/buffer.go
Normal file
199
fs/buffer.go
Normal file
@@ -0,0 +1,199 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// asyncReader will do async read-ahead from the input reader
|
||||
// and make the data available as an io.Reader.
|
||||
// This should be fully transparent, except that once an error
|
||||
// has been returned from the Reader, it will not recover.
|
||||
type asyncReader struct {
|
||||
in io.ReadCloser // Input reader
|
||||
ready chan *buffer // Buffers ready to be handed to the reader
|
||||
reuse chan *buffer // Buffers to reuse for input reading
|
||||
exit chan struct{} // Closes when finished
|
||||
buffers int // Number of buffers
|
||||
err error // If an error has occurred it is here
|
||||
cur *buffer // Current buffer being served
|
||||
exited chan struct{} // Channel is closed been the async reader shuts down
|
||||
closed bool // Has the parent reader been closed?
|
||||
}
|
||||
|
||||
// newAsyncReader returns a reader that will asynchronously read from
|
||||
// the supplied Reader into a number of buffers each with a given size.
|
||||
// It will start reading from the input at once, maybe even before this
|
||||
// function has returned.
|
||||
// The input can be read from the returned reader.
|
||||
// When done use Close to release the buffers and close the supplied input.
|
||||
func newAsyncReader(rd io.ReadCloser, buffers, size int) (io.ReadCloser, error) {
|
||||
if size <= 0 {
|
||||
return nil, fmt.Errorf("buffer size too small")
|
||||
}
|
||||
if buffers <= 0 {
|
||||
return nil, fmt.Errorf("number of buffers too small")
|
||||
}
|
||||
if rd == nil {
|
||||
return nil, fmt.Errorf("nil reader supplied")
|
||||
}
|
||||
a := &asyncReader{}
|
||||
a.init(rd, buffers, size)
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (a *asyncReader) init(rd io.ReadCloser, buffers, size int) {
|
||||
a.in = rd
|
||||
a.ready = make(chan *buffer, buffers)
|
||||
a.reuse = make(chan *buffer, buffers)
|
||||
a.exit = make(chan struct{}, 0)
|
||||
a.exited = make(chan struct{}, 0)
|
||||
a.buffers = buffers
|
||||
a.cur = nil
|
||||
|
||||
// Create buffers
|
||||
for i := 0; i < buffers; i++ {
|
||||
a.reuse <- newBuffer(size)
|
||||
}
|
||||
|
||||
// Start async reader
|
||||
go func() {
|
||||
// Ensure that when we exit this is signalled.
|
||||
defer close(a.exited)
|
||||
for {
|
||||
select {
|
||||
case b := <-a.reuse:
|
||||
err := b.read(a.in)
|
||||
a.ready <- b
|
||||
if err != nil {
|
||||
close(a.ready)
|
||||
return
|
||||
}
|
||||
case <-a.exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Read will return the next available data.
|
||||
func (a *asyncReader) fill() (err error) {
|
||||
if a.cur.isEmpty() {
|
||||
if a.cur != nil {
|
||||
a.reuse <- a.cur
|
||||
a.cur = nil
|
||||
}
|
||||
b, ok := <-a.ready
|
||||
if !ok {
|
||||
return a.err
|
||||
}
|
||||
a.cur = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read will return the next available data.
|
||||
func (a *asyncReader) Read(p []byte) (n int, err error) {
|
||||
// Swap buffer and maybe return error
|
||||
err = a.fill()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Copy what we can
|
||||
n = copy(p, a.cur.buffer())
|
||||
a.cur.increment(n)
|
||||
|
||||
// If at end of buffer, return any error, if present
|
||||
if a.cur.isEmpty() {
|
||||
a.err = a.cur.err
|
||||
return n, a.err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// WriteTo writes data to w until there's no more data to write or when an error occurs.
|
||||
// The return value n is the number of bytes written.
|
||||
// Any error encountered during the write is also returned.
|
||||
func (a *asyncReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
n = 0
|
||||
for {
|
||||
err = a.fill()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
n2, err := w.Write(a.cur.buffer())
|
||||
a.cur.increment(n2)
|
||||
n += int64(n2)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if a.cur.err != nil {
|
||||
a.err = a.cur.err
|
||||
return n, a.cur.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close will ensure that the underlying async reader is shut down.
|
||||
// It will also close the input supplied on newAsyncReader.
|
||||
func (a *asyncReader) Close() (err error) {
|
||||
select {
|
||||
case <-a.exited:
|
||||
default:
|
||||
close(a.exit)
|
||||
<-a.exited
|
||||
}
|
||||
if !a.closed {
|
||||
a.closed = true
|
||||
return a.in.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Internal buffer
|
||||
// If an error is present, it must be returned
|
||||
// once all buffer content has been served.
|
||||
type buffer struct {
|
||||
buf []byte
|
||||
err error
|
||||
offset int
|
||||
size int
|
||||
}
|
||||
|
||||
func newBuffer(size int) *buffer {
|
||||
return &buffer{buf: make([]byte, size), err: nil, size: size}
|
||||
}
|
||||
|
||||
// isEmpty returns true is offset is at end of
|
||||
// buffer, or
|
||||
func (b *buffer) isEmpty() bool {
|
||||
if b == nil {
|
||||
return true
|
||||
}
|
||||
if len(b.buf)-b.offset <= 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// read into start of the buffer from the supplied reader,
|
||||
// resets the offset and updates the size of the buffer.
|
||||
// Any error encountered during the read is returned.
|
||||
func (b *buffer) read(rd io.Reader) error {
|
||||
var n int
|
||||
n, b.err = rd.Read(b.buf[0:b.size])
|
||||
b.buf = b.buf[0:n]
|
||||
b.offset = 0
|
||||
return b.err
|
||||
}
|
||||
|
||||
// Return the buffer at current offset
|
||||
func (b *buffer) buffer() []byte {
|
||||
return b.buf[b.offset:]
|
||||
}
|
||||
|
||||
// increment the offset
|
||||
func (b *buffer) increment(n int) {
|
||||
b.offset += n
|
||||
}
|
||||
268
fs/buffer_test.go
Normal file
268
fs/buffer_test.go
Normal file
@@ -0,0 +1,268 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
)
|
||||
|
||||
func TestAsyncReader(t *testing.T) {
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
ar, err := newAsyncReader(buf, 4, 10000)
|
||||
if err != nil {
|
||||
t.Fatal("error when creating:", err)
|
||||
}
|
||||
|
||||
var dst = make([]byte, 100)
|
||||
n, err := ar.Read(dst)
|
||||
if err != nil {
|
||||
t.Fatal("error when reading:", err)
|
||||
}
|
||||
if n != 10 {
|
||||
t.Fatal("unexpected length, expected 10, got ", n)
|
||||
}
|
||||
|
||||
n, err = ar.Read(dst)
|
||||
if err != io.EOF {
|
||||
t.Fatal("expected io.EOF, got", err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatal("unexpected length, expected 0, got ", n)
|
||||
}
|
||||
|
||||
// Test read after error
|
||||
n, err = ar.Read(dst)
|
||||
if err != io.EOF {
|
||||
t.Fatal("expected io.EOF, got", err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatal("unexpected length, expected 0, got ", n)
|
||||
}
|
||||
|
||||
err = ar.Close()
|
||||
if err != nil {
|
||||
t.Fatal("error when closing:", err)
|
||||
}
|
||||
// Test double close
|
||||
err = ar.Close()
|
||||
if err != nil {
|
||||
t.Fatal("error when closing:", err)
|
||||
}
|
||||
|
||||
// Test Close without reading everything
|
||||
buf = ioutil.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
|
||||
ar, err = newAsyncReader(buf, 4, 100)
|
||||
if err != nil {
|
||||
t.Fatal("error when creating:", err)
|
||||
}
|
||||
err = ar.Close()
|
||||
if err != nil {
|
||||
t.Fatal("error when closing, noread:", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAsyncWriteTo(t *testing.T) {
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
ar, err := newAsyncReader(buf, 4, 10000)
|
||||
if err != nil {
|
||||
t.Fatal("error when creating:", err)
|
||||
}
|
||||
|
||||
var dst = &bytes.Buffer{}
|
||||
n, err := io.Copy(dst, ar)
|
||||
if err != io.EOF {
|
||||
t.Fatal("error when reading:", err)
|
||||
}
|
||||
if n != 10 {
|
||||
t.Fatal("unexpected length, expected 10, got ", n)
|
||||
}
|
||||
|
||||
// Should still return EOF
|
||||
n, err = io.Copy(dst, ar)
|
||||
if err != io.EOF {
|
||||
t.Fatal("expected io.EOF, got", err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatal("unexpected length, expected 0, got ", n)
|
||||
}
|
||||
|
||||
err = ar.Close()
|
||||
if err != nil {
|
||||
t.Fatal("error when closing:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAsyncReaderErrors(t *testing.T) {
|
||||
// test nil reader
|
||||
_, err := newAsyncReader(nil, 4, 10000)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when creating, but got nil")
|
||||
}
|
||||
|
||||
// invalid buffer number
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
_, err = newAsyncReader(buf, 0, 10000)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when creating, but got nil")
|
||||
}
|
||||
_, err = newAsyncReader(buf, -1, 10000)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when creating, but got nil")
|
||||
}
|
||||
|
||||
// invalid buffer size
|
||||
_, err = newAsyncReader(buf, 4, 0)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when creating, but got nil")
|
||||
}
|
||||
_, err = newAsyncReader(buf, 4, -1)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when creating, but got nil")
|
||||
}
|
||||
}
|
||||
|
||||
// Complex read tests, leveraged from "bufio".
|
||||
|
||||
type readMaker struct {
|
||||
name string
|
||||
fn func(io.Reader) io.Reader
|
||||
}
|
||||
|
||||
var readMakers = []readMaker{
|
||||
{"full", func(r io.Reader) io.Reader { return r }},
|
||||
{"byte", iotest.OneByteReader},
|
||||
{"half", iotest.HalfReader},
|
||||
{"data+err", iotest.DataErrReader},
|
||||
{"timeout", iotest.TimeoutReader},
|
||||
}
|
||||
|
||||
// Call Read to accumulate the text of a file
|
||||
func reads(buf io.Reader, m int) string {
|
||||
var b [1000]byte
|
||||
nb := 0
|
||||
for {
|
||||
n, err := buf.Read(b[nb : nb+m])
|
||||
nb += n
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil && err != iotest.ErrTimeout {
|
||||
panic("Data: " + err.Error())
|
||||
} else if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return string(b[0:nb])
|
||||
}
|
||||
|
||||
type bufReader struct {
|
||||
name string
|
||||
fn func(io.Reader) string
|
||||
}
|
||||
|
||||
var bufreaders = []bufReader{
|
||||
{"1", func(b io.Reader) string { return reads(b, 1) }},
|
||||
{"2", func(b io.Reader) string { return reads(b, 2) }},
|
||||
{"3", func(b io.Reader) string { return reads(b, 3) }},
|
||||
{"4", func(b io.Reader) string { return reads(b, 4) }},
|
||||
{"5", func(b io.Reader) string { return reads(b, 5) }},
|
||||
{"7", func(b io.Reader) string { return reads(b, 7) }},
|
||||
}
|
||||
|
||||
const minReadBufferSize = 16
|
||||
|
||||
var bufsizes = []int{
|
||||
0, minReadBufferSize, 23, 32, 46, 64, 93, 128, 1024, 4096,
|
||||
}
|
||||
|
||||
// Test various input buffer sizes, number of buffers and read sizes.
|
||||
func TestAsyncReaderSizes(t *testing.T) {
|
||||
var texts [31]string
|
||||
str := ""
|
||||
all := ""
|
||||
for i := 0; i < len(texts)-1; i++ {
|
||||
texts[i] = str + "\n"
|
||||
all += texts[i]
|
||||
str += string(i%26 + 'a')
|
||||
}
|
||||
texts[len(texts)-1] = all
|
||||
|
||||
for h := 0; h < len(texts); h++ {
|
||||
text := texts[h]
|
||||
for i := 0; i < len(readMakers); i++ {
|
||||
for j := 0; j < len(bufreaders); j++ {
|
||||
for k := 0; k < len(bufsizes); k++ {
|
||||
for l := 1; l < 10; l++ {
|
||||
readmaker := readMakers[i]
|
||||
bufreader := bufreaders[j]
|
||||
bufsize := bufsizes[k]
|
||||
read := readmaker.fn(strings.NewReader(text))
|
||||
buf := bufio.NewReaderSize(read, bufsize)
|
||||
ar, _ := newAsyncReader(ioutil.NopCloser(buf), l, 100)
|
||||
s := bufreader.fn(ar)
|
||||
// "timeout" expects the Reader to recover, asyncReader does not.
|
||||
if s != text && readmaker.name != "timeout" {
|
||||
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
|
||||
readmaker.name, bufreader.name, bufsize, text, s)
|
||||
}
|
||||
err := ar.Close()
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected close error:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test various input buffer sizes, number of buffers and read sizes.
|
||||
func TestAsyncReaderWriteTo(t *testing.T) {
|
||||
var texts [31]string
|
||||
str := ""
|
||||
all := ""
|
||||
for i := 0; i < len(texts)-1; i++ {
|
||||
texts[i] = str + "\n"
|
||||
all += texts[i]
|
||||
str += string(i%26 + 'a')
|
||||
}
|
||||
texts[len(texts)-1] = all
|
||||
|
||||
for h := 0; h < len(texts); h++ {
|
||||
text := texts[h]
|
||||
for i := 0; i < len(readMakers); i++ {
|
||||
for j := 0; j < len(bufreaders); j++ {
|
||||
for k := 0; k < len(bufsizes); k++ {
|
||||
for l := 1; l < 10; l++ {
|
||||
readmaker := readMakers[i]
|
||||
bufreader := bufreaders[j]
|
||||
bufsize := bufsizes[k]
|
||||
read := readmaker.fn(strings.NewReader(text))
|
||||
buf := bufio.NewReaderSize(read, bufsize)
|
||||
ar, _ := newAsyncReader(ioutil.NopCloser(buf), l, 100)
|
||||
dst := &bytes.Buffer{}
|
||||
wt := ar.(io.WriterTo)
|
||||
_, err := wt.WriteTo(dst)
|
||||
if err != nil && err != io.EOF && err != iotest.ErrTimeout {
|
||||
t.Fatal("Copy:", err)
|
||||
}
|
||||
s := dst.String()
|
||||
// "timeout" expects the Reader to recover, asyncReader does not.
|
||||
if s != text && readmaker.name != "timeout" {
|
||||
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
|
||||
readmaker.name, bufreader.name, bufsize, text, s)
|
||||
}
|
||||
err = ar.Close()
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected close error:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
47
fs/config.go
47
fs/config.go
@@ -17,6 +17,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"crypto/tls"
|
||||
|
||||
"github.com/Unknwon/goconfig"
|
||||
"github.com/mreiferson/go-httpclient"
|
||||
"github.com/spf13/pflag"
|
||||
@@ -53,6 +55,7 @@ var (
|
||||
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
|
||||
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
|
||||
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
||||
skipVerify = pflag.BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
|
||||
bwLimit SizeSuffix
|
||||
)
|
||||
|
||||
@@ -148,18 +151,20 @@ func Reveal(y string) string {
|
||||
|
||||
// ConfigInfo is filesystem config options
|
||||
type ConfigInfo struct {
|
||||
Verbose bool
|
||||
Quiet bool
|
||||
DryRun bool
|
||||
CheckSum bool
|
||||
SizeOnly bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
DumpHeaders bool
|
||||
DumpBodies bool
|
||||
Verbose bool
|
||||
Quiet bool
|
||||
DryRun bool
|
||||
CheckSum bool
|
||||
SizeOnly bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
DumpHeaders bool
|
||||
DumpBodies bool
|
||||
Filter *Filter
|
||||
InsecureSkipVerify bool // Skip server certificate verification
|
||||
}
|
||||
|
||||
// Transport returns an http.RoundTripper with the correct timeouts
|
||||
@@ -186,6 +191,14 @@ func (ci *ConfigInfo) Transport() http.RoundTripper {
|
||||
// ReadWriteTimeout, if non-zero, will set a deadline for every Read and
|
||||
// Write operation on the request connection.
|
||||
ReadWriteTimeout: ci.Timeout,
|
||||
|
||||
// InsecureSkipVerify controls whether a client verifies the
|
||||
// server's certificate chain and host name.
|
||||
// If InsecureSkipVerify is true, TLS accepts any certificate
|
||||
// presented by the server and any host name in that certificate.
|
||||
// In this mode, TLS is susceptible to man-in-the-middle attacks.
|
||||
// This should be used only for testing.
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify},
|
||||
}
|
||||
if ci.DumpHeaders || ci.DumpBodies {
|
||||
return NewLoggedTransport(t, ci.DumpBodies)
|
||||
@@ -238,6 +251,7 @@ func LoadConfig() {
|
||||
Config.SizeOnly = *sizeOnly
|
||||
Config.DumpHeaders = *dumpHeaders
|
||||
Config.DumpBodies = *dumpBodies
|
||||
Config.InsecureSkipVerify = *skipVerify
|
||||
|
||||
ConfigPath = *configFile
|
||||
|
||||
@@ -252,6 +266,12 @@ func LoadConfig() {
|
||||
}
|
||||
}
|
||||
|
||||
// Load filters
|
||||
Config.Filter, err = NewFilter()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load filters: %v", err)
|
||||
}
|
||||
|
||||
// Start the token bucket limiter
|
||||
startTokenBucket()
|
||||
}
|
||||
@@ -491,11 +511,14 @@ func EditConfig() {
|
||||
for {
|
||||
fmt.Printf("name> ")
|
||||
name := ReadLine()
|
||||
parts := matcher.FindStringSubmatch(name + ":")
|
||||
switch {
|
||||
case name == "":
|
||||
fmt.Printf("Can't use empty name\n")
|
||||
case isDriveLetter(name):
|
||||
fmt.Printf("Can't use %q as it can be confused a drive letter\n", name)
|
||||
case len(parts) != 3 || parts[2] != "":
|
||||
fmt.Printf("Can't use %q as it has invalid characters in it %v\n", name, parts)
|
||||
default:
|
||||
NewRemote(name)
|
||||
break nameLoop
|
||||
|
||||
102
fs/error.go
Normal file
102
fs/error.go
Normal file
@@ -0,0 +1,102 @@
|
||||
// Errors and error handling
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Retry is an optional interface for error as to whether the
|
||||
// operation should be retried at a high level.
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
type Retry interface {
|
||||
error
|
||||
Retry() bool
|
||||
}
|
||||
|
||||
// retryError is a type of error
|
||||
type retryError string
|
||||
|
||||
// Error interface
|
||||
func (r retryError) Error() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (r retryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retry = retryError("")
|
||||
|
||||
// RetryErrorf makes an error which indicates it would like to be retried
|
||||
func RetryErrorf(format string, a ...interface{}) error {
|
||||
return retryError(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// PlainRetryError is an error wrapped so it will retry
|
||||
type plainRetryError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (err plainRetryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retry = plainRetryError{(error)(nil)}
|
||||
|
||||
// RetryError makes an error which indicates it would like to be retried
|
||||
func RetryError(err error) error {
|
||||
return plainRetryError{err}
|
||||
}
|
||||
|
||||
// ShouldRetry looks at an error and tries to work out if retrying the
|
||||
// operation that caused it would be a good idea. It returns true if
|
||||
// the error implements Timeout() or Temporary() and it returns true.
|
||||
func ShouldRetry(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Unwrap url.Error
|
||||
if urlErr, ok := err.(*url.Error); ok {
|
||||
err = urlErr.Err
|
||||
}
|
||||
|
||||
// Check for net error Timeout()
|
||||
if x, ok := err.(interface {
|
||||
Timeout() bool
|
||||
}); ok && x.Timeout() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for net error Temporary()
|
||||
if x, ok := err.(interface {
|
||||
Temporary() bool
|
||||
}); ok && x.Temporary() {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ShouldRetryHTTP returns a boolean as to whether this resp deserves.
|
||||
// It checks to see if the HTTP response code is in the slice
|
||||
// retryErrorCodes.
|
||||
func ShouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, e := range retryErrorCodes {
|
||||
if resp.StatusCode == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
248
fs/filter.go
Normal file
248
fs/filter.go
Normal file
@@ -0,0 +1,248 @@
|
||||
// Control the filtering of files
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Global
|
||||
var (
|
||||
// Flags
|
||||
deleteExcluded = pflag.BoolP("delete-excluded", "", false, "Delete files on dest excluded from sync")
|
||||
filterRule = pflag.StringP("filter", "f", "", "Add a file-filtering rule")
|
||||
filterFrom = pflag.StringP("filter-from", "", "", "Read filtering patterns from a file")
|
||||
excludeRule = pflag.StringP("exclude", "", "", "Exclude files matching pattern")
|
||||
excludeFrom = pflag.StringP("exclude-from", "", "", "Read exclude patterns from file")
|
||||
includeRule = pflag.StringP("include", "", "", "Include files matching pattern")
|
||||
includeFrom = pflag.StringP("include-from", "", "", "Read include patterns from file")
|
||||
filesFrom = pflag.StringP("files-from", "", "", "Read list of source-file names from file")
|
||||
minSize SizeSuffix
|
||||
maxSize SizeSuffix
|
||||
dumpFilters = pflag.BoolP("dump-filters", "", false, "Dump the filters to the output")
|
||||
//cvsExclude = pflag.BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
|
||||
)
|
||||
|
||||
func init() {
|
||||
pflag.VarP(&minSize, "min-size", "", "Don't transfer any file smaller than this in k or suffix k|M|G")
|
||||
pflag.VarP(&maxSize, "max-size", "", "Don't transfer any file larger than this in k or suffix k|M|G")
|
||||
}
|
||||
|
||||
// rule is one filter rule
|
||||
type rule struct {
|
||||
Include bool
|
||||
Regexp *regexp.Regexp
|
||||
}
|
||||
|
||||
// Match returns true if rule matches path
|
||||
func (r *rule) Match(path string) bool {
|
||||
return r.Regexp.MatchString(path)
|
||||
}
|
||||
|
||||
// String the rule
|
||||
func (r *rule) String() string {
|
||||
c := "-"
|
||||
if r.Include {
|
||||
c = "+"
|
||||
}
|
||||
return fmt.Sprintf("%s %s", c, r.Regexp.String())
|
||||
}
|
||||
|
||||
// filesMap describes the map of files to transfer
|
||||
type filesMap map[string]struct{}
|
||||
|
||||
// Filter describes any filtering in operation
|
||||
type Filter struct {
|
||||
DeleteExcluded bool
|
||||
MinSize int64
|
||||
MaxSize int64
|
||||
rules []rule
|
||||
files filesMap
|
||||
}
|
||||
|
||||
// NewFilter parses the command line options and creates a Filter object
|
||||
func NewFilter() (f *Filter, err error) {
|
||||
f = &Filter{
|
||||
DeleteExcluded: *deleteExcluded,
|
||||
MinSize: int64(minSize),
|
||||
MaxSize: int64(maxSize),
|
||||
}
|
||||
if *includeRule != "" {
|
||||
err = f.Add(true, *includeRule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add implicit exclude
|
||||
err = f.Add(false, "*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *includeFrom != "" {
|
||||
err := forEachLine(*includeFrom, func(line string) error {
|
||||
return f.Add(true, line)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add implicit exclude
|
||||
err = f.Add(false, "*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *excludeRule != "" {
|
||||
err = f.Add(false, *excludeRule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *excludeFrom != "" {
|
||||
err := forEachLine(*excludeFrom, func(line string) error {
|
||||
return f.Add(false, line)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *filterRule != "" {
|
||||
err = f.AddRule(*filterRule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *filterFrom != "" {
|
||||
err := forEachLine(*filterFrom, f.AddRule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *filesFrom != "" {
|
||||
err := forEachLine(*filesFrom, func(line string) error {
|
||||
return f.AddFile(line)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *dumpFilters {
|
||||
fmt.Println("--- start filters ---")
|
||||
fmt.Println(f.DumpFilters())
|
||||
fmt.Println("--- end filters ---")
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Add adds a filter rule with include or exclude status indicated
|
||||
func (f *Filter) Add(Include bool, glob string) error {
|
||||
re, err := globToRegexp(glob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rule := rule{
|
||||
Include: Include,
|
||||
Regexp: re,
|
||||
}
|
||||
f.rules = append(f.rules, rule)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddRule adds a filter rule with include/exclude indicated by the prefix
|
||||
//
|
||||
// These are
|
||||
//
|
||||
// + glob
|
||||
// - glob
|
||||
// !
|
||||
//
|
||||
// '+' includes the glob, '-' excludes it and '!' resets the filter list
|
||||
//
|
||||
// Line comments may be introduced with '#' or ';'
|
||||
func (f *Filter) AddRule(rule string) error {
|
||||
switch {
|
||||
case rule == "!":
|
||||
f.Clear()
|
||||
return nil
|
||||
case strings.HasPrefix(rule, "- "):
|
||||
return f.Add(false, rule[2:])
|
||||
case strings.HasPrefix(rule, "+ "):
|
||||
return f.Add(true, rule[2:])
|
||||
}
|
||||
return fmt.Errorf("Malformed rule %q", rule)
|
||||
}
|
||||
|
||||
// AddFile adds a single file to the files from list
|
||||
func (f *Filter) AddFile(file string) error {
|
||||
if f.files == nil {
|
||||
f.files = make(filesMap)
|
||||
}
|
||||
file = strings.Trim(file, "/")
|
||||
f.files[file] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear clears all the filter rules
|
||||
func (f *Filter) Clear() {
|
||||
f.rules = nil
|
||||
}
|
||||
|
||||
// Include returns whether this object should be included into the
|
||||
// sync or not
|
||||
func (f *Filter) Include(remote string, size int64) bool {
|
||||
// filesFrom takes precedence
|
||||
if f.files != nil {
|
||||
_, include := f.files[remote]
|
||||
return include
|
||||
}
|
||||
if f.MinSize != 0 && size < f.MinSize {
|
||||
return false
|
||||
}
|
||||
if f.MaxSize != 0 && size > f.MaxSize {
|
||||
return false
|
||||
}
|
||||
for _, rule := range f.rules {
|
||||
if rule.Match(remote) {
|
||||
return rule.Include
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// forEachLine calls fn on every line in the file pointed to by path
|
||||
//
|
||||
// It ignores empty lines and lines starting with '#' or ';'
|
||||
func forEachLine(path string, fn func(string) error) (err error) {
|
||||
in, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer checkClose(in, &err)
|
||||
scanner := bufio.NewScanner(in)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 || line[0] == '#' || line[0] == ';' {
|
||||
continue
|
||||
}
|
||||
err := fn(line)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return scanner.Err()
|
||||
}
|
||||
|
||||
// DumpFilters dumps the filters in textual form, 1 per line
|
||||
func (f *Filter) DumpFilters() string {
|
||||
rules := []string{}
|
||||
for _, rule := range f.rules {
|
||||
rules = append(rules, rule.String())
|
||||
}
|
||||
return strings.Join(rules, "\n")
|
||||
}
|
||||
324
fs/filter_test.go
Normal file
324
fs/filter_test.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewFilterDefault(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if f.DeleteExcluded != false {
|
||||
t.Errorf("DeleteExcluded want false got %v", f.DeleteExcluded)
|
||||
}
|
||||
if f.MinSize != 0 {
|
||||
t.Errorf("MinSize want 0 got %v", f.MinSize)
|
||||
}
|
||||
if f.MaxSize != 0 {
|
||||
t.Errorf("MaxSize want 0 got %v", f.MaxSize)
|
||||
}
|
||||
if len(f.rules) != 0 {
|
||||
t.Errorf("rules want non got %v", f.rules)
|
||||
}
|
||||
if f.files != nil {
|
||||
t.Errorf("files want none got %v", f.files)
|
||||
}
|
||||
}
|
||||
|
||||
// return a pointer to the string
|
||||
func stringP(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
// testFile creates a temp file with the contents
|
||||
func testFile(t *testing.T, contents string) *string {
|
||||
out, err := ioutil.TempFile("", "filter_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
err := out.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
_, err = out.Write([]byte(contents))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s := out.Name()
|
||||
return &s
|
||||
}
|
||||
|
||||
func TestNewFilterFull(t *testing.T) {
|
||||
mins := int64(100 * 1024)
|
||||
maxs := int64(1000 * 1024)
|
||||
emptyString := ""
|
||||
isFalse := false
|
||||
isTrue := true
|
||||
|
||||
// Set up the input
|
||||
deleteExcluded = &isTrue
|
||||
filterRule = stringP("- filter1")
|
||||
filterFrom = testFile(t, "#comment\n+ filter2\n- filter3\n")
|
||||
excludeRule = stringP("exclude1")
|
||||
excludeFrom = testFile(t, "#comment\nexclude2\nexclude3\n")
|
||||
includeRule = stringP("include1")
|
||||
includeFrom = testFile(t, "#comment\ninclude2\ninclude3\n")
|
||||
filesFrom = testFile(t, "#comment\nfiles1\nfiles2\n")
|
||||
minSize = SizeSuffix(mins)
|
||||
maxSize = SizeSuffix(maxs)
|
||||
|
||||
rm := func(p string) {
|
||||
err := os.Remove(p)
|
||||
if err != nil {
|
||||
t.Logf("error removing %q: %v", p, err)
|
||||
}
|
||||
}
|
||||
// Reset the input
|
||||
defer func() {
|
||||
rm(*filterFrom)
|
||||
rm(*excludeFrom)
|
||||
rm(*includeFrom)
|
||||
rm(*filesFrom)
|
||||
minSize = 0
|
||||
maxSize = 0
|
||||
deleteExcluded = &isFalse
|
||||
filterRule = &emptyString
|
||||
filterFrom = &emptyString
|
||||
excludeRule = &emptyString
|
||||
excludeFrom = &emptyString
|
||||
includeRule = &emptyString
|
||||
includeFrom = &emptyString
|
||||
filesFrom = &emptyString
|
||||
}()
|
||||
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if f.DeleteExcluded != true {
|
||||
t.Errorf("DeleteExcluded want true got %v", f.DeleteExcluded)
|
||||
}
|
||||
if f.MinSize != mins {
|
||||
t.Errorf("MinSize want %v got %v", mins, f.MinSize)
|
||||
}
|
||||
if f.MaxSize != maxs {
|
||||
t.Errorf("MaxSize want %v got %v", maxs, f.MaxSize)
|
||||
}
|
||||
got := f.DumpFilters()
|
||||
want := `+ (^|/)include1$
|
||||
- (^|/)[^/]*$
|
||||
+ (^|/)include2$
|
||||
+ (^|/)include3$
|
||||
- (^|/)[^/]*$
|
||||
- (^|/)exclude1$
|
||||
- (^|/)exclude2$
|
||||
- (^|/)exclude3$
|
||||
- (^|/)filter1$
|
||||
+ (^|/)filter2$
|
||||
- (^|/)filter3$`
|
||||
if got != want {
|
||||
t.Errorf("rules want %s got %s", want, got)
|
||||
}
|
||||
if len(f.files) != 2 {
|
||||
t.Errorf("files want 2 got %v", f.files)
|
||||
}
|
||||
for _, name := range []string{"files1", "files2"} {
|
||||
_, ok := f.files[name]
|
||||
if !ok {
|
||||
t.Errorf("Didn't find file %q in f.files", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type includeTest struct {
|
||||
in string
|
||||
size int64
|
||||
want bool
|
||||
}
|
||||
|
||||
func testInclude(t *testing.T, f *Filter, tests []includeTest) {
|
||||
for _, test := range tests {
|
||||
got := f.Include(test.in, test.size)
|
||||
if test.want != got {
|
||||
t.Errorf("%q,%d: want %v got %v", test.in, test.size, test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFilterIncludeFiles(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = f.AddFile("file1.jpg")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
err = f.AddFile("/file2.jpg")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 0, true},
|
||||
{"file2.jpg", 1, true},
|
||||
{"potato/file2.jpg", 2, false},
|
||||
{"file3.jpg", 3, false},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewFilterMinSize(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.MinSize = 100
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, true},
|
||||
{"file2.jpg", 101, true},
|
||||
{"potato/file2.jpg", 99, false},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewFilterMaxSize(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.MaxSize = 100
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, true},
|
||||
{"file2.jpg", 101, false},
|
||||
{"potato/file2.jpg", 99, true},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewFilterMatches(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
add := func(s string) {
|
||||
err := f.AddRule(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
add("+ cleared")
|
||||
add("!")
|
||||
add("- file1.jpg")
|
||||
add("+ file2.png")
|
||||
add("+ *.jpg")
|
||||
add("- *.png")
|
||||
add("- /potato")
|
||||
add("+ /sausage1")
|
||||
add("+ /sausage2*")
|
||||
add("+ /sausage3**")
|
||||
add("- *")
|
||||
testInclude(t, f, []includeTest{
|
||||
{"cleared", 100, false},
|
||||
{"file1.jpg", 100, false},
|
||||
{"file2.png", 100, true},
|
||||
{"afile2.png", 100, false},
|
||||
{"file3.jpg", 101, true},
|
||||
{"file4.png", 101, false},
|
||||
{"potato", 101, false},
|
||||
{"sausage1", 101, true},
|
||||
{"sausage1/potato", 101, false},
|
||||
{"sausage2potato", 101, true},
|
||||
{"sausage2/potato", 101, false},
|
||||
{"sausage3/potato", 101, true},
|
||||
{"unicorn", 99, false},
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilterForEachLine(t *testing.T) {
|
||||
file := testFile(t, `; comment
|
||||
one
|
||||
# another comment
|
||||
|
||||
|
||||
two
|
||||
# indented comment
|
||||
three
|
||||
four
|
||||
five
|
||||
six `)
|
||||
defer func() {
|
||||
err := os.Remove(*file)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
lines := []string{}
|
||||
err := forEachLine(*file, func(s string) error {
|
||||
lines = append(lines, s)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
got := strings.Join(lines, ",")
|
||||
want := "one,two,three,four,five,six"
|
||||
if want != got {
|
||||
t.Errorf("want %q got %q", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterMatchesFromDocs(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
glob string
|
||||
included bool
|
||||
file string
|
||||
}{
|
||||
{"file.jpg", true, "file.jpg"},
|
||||
{"file.jpg", true, "directory/file.jpg"},
|
||||
{"file.jpg", false, "afile.jpg"},
|
||||
{"file.jpg", false, "directory/afile.jpg"},
|
||||
{"/file.jpg", true, "file.jpg"},
|
||||
{"/file.jpg", false, "afile.jpg"},
|
||||
{"/file.jpg", false, "directory/file.jpg"},
|
||||
{"*.jpg", true, "file.jpg"},
|
||||
{"*.jpg", true, "directory/file.jpg"},
|
||||
{"*.jpg", false, "file.jpg/anotherfile.png"},
|
||||
{"dir/**", true, "dir/file.jpg"},
|
||||
{"dir/**", true, "dir/dir1/dir2/file.jpg"},
|
||||
{"dir/**", false, "directory/file.jpg"},
|
||||
{"dir/**", false, "adir/file.jpg"},
|
||||
{"l?ss", true, "less"},
|
||||
{"l?ss", true, "lass"},
|
||||
{"l?ss", false, "floss"},
|
||||
{"h[ae]llo", true, "hello"},
|
||||
{"h[ae]llo", true, "hallo"},
|
||||
{"h[ae]llo", false, "hullo"},
|
||||
{"{one,two}_potato", true, "one_potato"},
|
||||
{"{one,two}_potato", true, "two_potato"},
|
||||
{"{one,two}_potato", false, "three_potato"},
|
||||
{"{one,two}_potato", false, "_potato"},
|
||||
{"\\*.jpg", true, "*.jpg"},
|
||||
{"\\\\.jpg", true, "\\.jpg"},
|
||||
{"\\[one\\].jpg", true, "[one].jpg"},
|
||||
} {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = f.Add(true, test.glob)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = f.Add(false, "*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
included := f.Include(test.file, 0)
|
||||
if included != test.included {
|
||||
t.Logf("%q match %q: want %v got %v", test.glob, test.file, test.included, included)
|
||||
}
|
||||
}
|
||||
}
|
||||
50
fs/fs.go
50
fs/fs.go
@@ -197,54 +197,6 @@ type DirMover interface {
|
||||
DirMove(src Fs) error
|
||||
}
|
||||
|
||||
// Retry is optional interface for error as to whether the operation
|
||||
// should be retried at a high level.
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
type Retry interface {
|
||||
error
|
||||
Retry() bool
|
||||
}
|
||||
|
||||
// retryError is a type of error
|
||||
type retryError string
|
||||
|
||||
// Error interface
|
||||
func (r retryError) Error() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (r retryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retry = retryError("")
|
||||
|
||||
// RetryErrorf makes an error which indicates it would like to be retried
|
||||
func RetryErrorf(format string, a ...interface{}) error {
|
||||
return retryError(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// PlainRetryError is an error wrapped so it will retry
|
||||
type plainRetryError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (err plainRetryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retry = plainRetryError{(error)(nil)}
|
||||
|
||||
// RetryError makes an error which indicates it would like to be retried
|
||||
func RetryError(err error) error {
|
||||
return plainRetryError{err}
|
||||
}
|
||||
|
||||
// ObjectsChan is a channel of Objects
|
||||
type ObjectsChan chan Object
|
||||
|
||||
@@ -284,7 +236,7 @@ func Find(name string) (*Info, error) {
|
||||
}
|
||||
|
||||
// Pattern to match an rclone url
|
||||
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
|
||||
var matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`)
|
||||
|
||||
// NewFs makes a new Fs object from the path
|
||||
//
|
||||
|
||||
117
fs/glob.go
Normal file
117
fs/glob.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// rsync style glob parser
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// globToRegexp converts an rsync style glob to a regexp
|
||||
//
|
||||
// documented in filtering.md
|
||||
func globToRegexp(glob string) (*regexp.Regexp, error) {
|
||||
var re bytes.Buffer
|
||||
if strings.HasPrefix(glob, "/") {
|
||||
glob = glob[1:]
|
||||
_, _ = re.WriteRune('^')
|
||||
} else {
|
||||
_, _ = re.WriteString("(^|/)")
|
||||
}
|
||||
consecutiveStars := 0
|
||||
insertStars := func() error {
|
||||
if consecutiveStars > 0 {
|
||||
switch consecutiveStars {
|
||||
case 1:
|
||||
_, _ = re.WriteString(`[^/]*`)
|
||||
case 2:
|
||||
_, _ = re.WriteString(`.*`)
|
||||
default:
|
||||
return fmt.Errorf("too many stars in %q", glob)
|
||||
}
|
||||
}
|
||||
consecutiveStars = 0
|
||||
return nil
|
||||
}
|
||||
inBraces := false
|
||||
inBrackets := 0
|
||||
slashed := false
|
||||
for _, c := range glob {
|
||||
if slashed {
|
||||
_, _ = re.WriteRune(c)
|
||||
slashed = false
|
||||
continue
|
||||
}
|
||||
if c != '*' {
|
||||
err := insertStars()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if inBrackets > 0 {
|
||||
_, _ = re.WriteRune(c)
|
||||
if c == '[' {
|
||||
inBrackets++
|
||||
}
|
||||
if c == ']' {
|
||||
inBrackets--
|
||||
}
|
||||
continue
|
||||
}
|
||||
switch c {
|
||||
case '\\':
|
||||
_, _ = re.WriteRune(c)
|
||||
slashed = true
|
||||
case '*':
|
||||
consecutiveStars++
|
||||
case '?':
|
||||
_, _ = re.WriteString(`[^/]`)
|
||||
case '[':
|
||||
_, _ = re.WriteRune(c)
|
||||
inBrackets++
|
||||
case ']':
|
||||
return nil, fmt.Errorf("mismatched ']' in glob %q", glob)
|
||||
case '{':
|
||||
if inBraces {
|
||||
return nil, fmt.Errorf("can't nest '{' '}' in glob %q", glob)
|
||||
}
|
||||
inBraces = true
|
||||
_, _ = re.WriteRune('(')
|
||||
case '}':
|
||||
if !inBraces {
|
||||
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
|
||||
}
|
||||
_, _ = re.WriteRune(')')
|
||||
inBraces = false
|
||||
case ',':
|
||||
if inBraces {
|
||||
_, _ = re.WriteRune('|')
|
||||
} else {
|
||||
_, _ = re.WriteRune(c)
|
||||
}
|
||||
case '.', '+', '(', ')', '|', '^', '$': // regexp meta characters not dealt with above
|
||||
_, _ = re.WriteRune('\\')
|
||||
_, _ = re.WriteRune(c)
|
||||
default:
|
||||
_, _ = re.WriteRune(c)
|
||||
}
|
||||
}
|
||||
err := insertStars()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if inBrackets > 0 {
|
||||
return nil, fmt.Errorf("mismatched '[' and ']' in glob %q", glob)
|
||||
}
|
||||
if inBraces {
|
||||
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
|
||||
}
|
||||
_, _ = re.WriteRune('$')
|
||||
result, err := regexp.Compile(re.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Bad glob pattern %q: %v (%q)", glob, err, re.String())
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
64
fs/glob_test.go
Normal file
64
fs/glob_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGlobToRegexp(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
error string
|
||||
}{
|
||||
{``, `(^|/)$`, ``},
|
||||
{`potato`, `(^|/)potato$`, ``},
|
||||
{`potato,sausage`, `(^|/)potato,sausage$`, ``},
|
||||
{`/potato`, `^potato$`, ``},
|
||||
{`potato?sausage`, `(^|/)potato[^/]sausage$`, ``},
|
||||
{`potat[oa]`, `(^|/)potat[oa]$`, ``},
|
||||
{`potat[a-z]or`, `(^|/)potat[a-z]or$`, ``},
|
||||
{`potat[[:alpha:]]or`, `(^|/)potat[[:alpha:]]or$`, ``},
|
||||
{`'.' '+' '(' ')' '|' '^' '$'`, `(^|/)'\.' '\+' '\(' '\)' '\|' '\^' '\$'$`, ``},
|
||||
{`*.jpg`, `(^|/)[^/]*\.jpg$`, ``},
|
||||
{`a{b,c,d}e`, `(^|/)a(b|c|d)e$`, ``},
|
||||
{`potato**`, `(^|/)potato.*$`, ``},
|
||||
{`potato**sausage`, `(^|/)potato.*sausage$`, ``},
|
||||
{`*.p[lm]`, `(^|/)[^/]*\.p[lm]$`, ``},
|
||||
{`[\[\]]`, `(^|/)[\[\]]$`, ``},
|
||||
{`***potato`, `(^|/)`, `too many stars`},
|
||||
{`***`, `(^|/)`, `too many stars`},
|
||||
{`ab]c`, `(^|/)`, `mismatched ']'`},
|
||||
{`ab[c`, `(^|/)`, `mismatched '[' and ']'`},
|
||||
{`ab{{cd`, `(^|/)`, `can't nest`},
|
||||
{`ab{}}cd`, `(^|/)`, `mismatched '{' and '}'`},
|
||||
{`ab}c`, `(^|/)`, `mismatched '{' and '}'`},
|
||||
{`ab{c`, `(^|/)`, `mismatched '{' and '}'`},
|
||||
{`*.{jpg,png,gif}`, `(^|/)[^/]*\.(jpg|png|gif)$`, ``},
|
||||
{`[a--b]`, `(^|/)`, `Bad glob pattern`},
|
||||
{`a\*b`, `(^|/)a\*b$`, ``},
|
||||
{`a\\b`, `(^|/)a\\b$`, ``},
|
||||
} {
|
||||
gotRe, err := globToRegexp(test.in)
|
||||
if test.error == "" {
|
||||
if err != nil {
|
||||
t.Errorf("%q: not expecting error: %v", test.in, err)
|
||||
} else {
|
||||
got := gotRe.String()
|
||||
if test.want != got {
|
||||
t.Errorf("%q: want %q got %q", test.in, test.want, got)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err == nil {
|
||||
t.Errorf("%q: expecting error but didn't get one", test.in)
|
||||
} else {
|
||||
got := err.Error()
|
||||
if !strings.Contains(got, test.error) {
|
||||
t.Errorf("%q: want error %q got %q", test.in, test.error, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -8,7 +8,10 @@ import (
|
||||
"net/http/httputil"
|
||||
)
|
||||
|
||||
const separator = "------------------------------------------------------------"
|
||||
const (
|
||||
separatorReq = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
|
||||
separatorResp = "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
|
||||
)
|
||||
|
||||
// LoggedTransport is an http transport which logs the traffic
|
||||
type LoggedTransport struct {
|
||||
@@ -39,16 +42,16 @@ func (t *LoggedTransport) CancelRequest(req *http.Request) {
|
||||
|
||||
// RoundTrip implements the RoundTripper interface.
|
||||
func (t *LoggedTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
buf, _ := httputil.DumpRequest(req, t.logBody)
|
||||
log.Println(separator)
|
||||
buf, _ := httputil.DumpRequestOut(req, t.logBody)
|
||||
log.Println(separatorReq)
|
||||
log.Println("HTTP REQUEST")
|
||||
log.Println(string(buf))
|
||||
log.Println(separator)
|
||||
log.Println(separatorReq)
|
||||
resp, err = t.wrapped.RoundTrip(req)
|
||||
buf, _ = httputil.DumpResponse(resp, t.logBody)
|
||||
log.Println(separator)
|
||||
log.Println(separatorResp)
|
||||
log.Println("HTTP RESPONSE")
|
||||
log.Println(string(buf))
|
||||
log.Println(separator)
|
||||
log.Println(separatorResp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"mime"
|
||||
"path"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -197,6 +198,12 @@ tryAgain:
|
||||
ErrorLog(src, "Failed to open: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// On big files add a buffer
|
||||
if src.Size() > 10<<20 {
|
||||
in0, _ = newAsyncReader(in0, 4, 4<<20)
|
||||
}
|
||||
|
||||
in := NewAccount(in0, src) // account the transfer
|
||||
|
||||
if doUpdate {
|
||||
@@ -438,13 +445,20 @@ func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
|
||||
go func() {
|
||||
for src := range fsrc.List() {
|
||||
remote := src.Remote()
|
||||
dst, found := delFiles[remote]
|
||||
if found {
|
||||
delete(delFiles, remote)
|
||||
toBeChecked <- ObjectPair{src, dst}
|
||||
dst, dstFound := delFiles[remote]
|
||||
if !Config.Filter.Include(remote, src.Size()) {
|
||||
Debug(src, "Excluding from sync")
|
||||
if dstFound && !Config.Filter.DeleteExcluded {
|
||||
delete(delFiles, remote)
|
||||
}
|
||||
} else {
|
||||
// No need to check since doesn't exist
|
||||
toBeUploaded <- ObjectPair{src, nil}
|
||||
if dstFound {
|
||||
delete(delFiles, remote)
|
||||
toBeChecked <- ObjectPair{src, dst}
|
||||
} else {
|
||||
// No need to check since doesn't exist
|
||||
toBeUploaded <- ObjectPair{src, nil}
|
||||
}
|
||||
}
|
||||
}
|
||||
close(toBeChecked)
|
||||
@@ -673,6 +687,15 @@ func Md5sum(f Fs, w io.Writer) error {
|
||||
})
|
||||
}
|
||||
|
||||
// Count counts the objects and their sizes in the Fs
|
||||
func Count(f Fs) (objects int64, size int64, err error) {
|
||||
err = ListFn(f, func(o Object) {
|
||||
atomic.AddInt64(&objects, 1)
|
||||
atomic.AddInt64(&size, o.Size())
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
|
||||
func ListDir(f Fs, w io.Writer) error {
|
||||
for dir := range f.ListDir() {
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
_ "github.com/ncw/rclone/dropbox"
|
||||
_ "github.com/ncw/rclone/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/local"
|
||||
_ "github.com/ncw/rclone/onedrive"
|
||||
_ "github.com/ncw/rclone/s3"
|
||||
_ "github.com/ncw/rclone/swift"
|
||||
)
|
||||
@@ -407,6 +408,59 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Test with exclude
|
||||
func TestSyncWithExclude(t *testing.T) {
|
||||
WriteFile("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
|
||||
fs.Config.Filter.MaxSize = 80
|
||||
defer func() {
|
||||
fs.Config.Filter.MaxSize = 0
|
||||
}()
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Test with exclude and delete excluded
|
||||
func TestSyncWithExcludeAndDeleleteExcluded(t *testing.T) {
|
||||
fs.Config.Filter.MaxSize = 40
|
||||
fs.Config.Filter.DeleteExcluded = true
|
||||
reset := func() {
|
||||
fs.Config.Filter.MaxSize = 0
|
||||
fs.Config.Filter.DeleteExcluded = false
|
||||
}
|
||||
defer reset()
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
|
||||
// Tidy up
|
||||
reset()
|
||||
err = os.Remove(localName + "/enormous")
|
||||
if err != nil {
|
||||
t.Fatalf("Remove failed: %v", err)
|
||||
}
|
||||
err = fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items = []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Test a server side move if possible, or the backup path if not
|
||||
func TestServerSideMove(t *testing.T) {
|
||||
fremoteMove, finaliseMove, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
@@ -530,7 +584,21 @@ func TestMd5sum(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
objects, size, err := fs.Count(fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Count failed: %v", err)
|
||||
}
|
||||
if objects != 2 {
|
||||
t.Errorf("want 2 objects got %d", objects)
|
||||
}
|
||||
if size != 60 {
|
||||
t.Errorf("want size 60 got %d", size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheck(t *testing.T) {
|
||||
// FIXME
|
||||
}
|
||||
|
||||
// Clean the temporary directory
|
||||
|
||||
@@ -9,10 +9,7 @@ TestDrive:
|
||||
TestGoogleCloudStorage:
|
||||
TestDropbox:
|
||||
TestAmazonCloudDrive:
|
||||
"
|
||||
|
||||
REMOTES="
|
||||
TestAmazonCloudDrive:
|
||||
TestOneDrive:
|
||||
"
|
||||
|
||||
function test_remote {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// Version of rclone
|
||||
const Version = "v1.21"
|
||||
const Version = "v1.24"
|
||||
|
||||
@@ -36,8 +36,10 @@ var (
|
||||
file2 = fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
|
||||
Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`,
|
||||
WinPath: `hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ _ _/z.txt`,
|
||||
WinPath: `hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _/z.txt`,
|
||||
}
|
||||
dumpHeaders = flag.Bool("dump-headers", false, "Dump HTTP headers - may contain sensitive info")
|
||||
dumpBodies = flag.Bool("dump-bodies", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -50,6 +52,8 @@ func TestInit(t *testing.T) {
|
||||
fs.LoadConfig()
|
||||
fs.Config.Verbose = false
|
||||
fs.Config.Quiet = true
|
||||
fs.Config.DumpHeaders = *dumpHeaders
|
||||
fs.Config.DumpBodies = *dumpBodies
|
||||
t.Logf("Using remote %q", RemoteName)
|
||||
if RemoteName == "" {
|
||||
RemoteName, err = fstest.LocalRemote()
|
||||
@@ -279,7 +283,7 @@ func TestFsCopy(t *testing.T) {
|
||||
src := findObject(t, file1.Path)
|
||||
dst, err := remote.(fs.Copier).Copy(src, file1Copy.Path)
|
||||
if err != nil {
|
||||
t.Errorf("Copy failed: %v", err)
|
||||
t.Fatalf("Copy failed: %v (%#v)", err, err)
|
||||
}
|
||||
|
||||
// check file exists in new listing
|
||||
|
||||
@@ -45,7 +45,6 @@ type Data struct {
|
||||
FsName string
|
||||
UpperFsName string
|
||||
TestName string
|
||||
ObjectName string
|
||||
Fns []string
|
||||
}
|
||||
|
||||
@@ -65,7 +64,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*{{ .FsName }}.FsObject{{ .ObjectName }})(nil))
|
||||
fstests.NilObject = fs.Object((*{{ .FsName }}.Object)(nil))
|
||||
fstests.RemoteName = "{{ .TestName }}"
|
||||
}
|
||||
|
||||
@@ -75,7 +74,7 @@ func init() {
|
||||
`
|
||||
|
||||
// Generate test file piping it through gofmt
|
||||
func generateTestProgram(t *template.Template, fns []string, Fsname, ObjectName string) {
|
||||
func generateTestProgram(t *template.Template, fns []string, Fsname string) {
|
||||
fsname := strings.ToLower(Fsname)
|
||||
TestName := "Test" + Fsname + ":"
|
||||
outfile := "../../" + fsname + "/" + fsname + "_test.go"
|
||||
@@ -89,7 +88,6 @@ func generateTestProgram(t *template.Template, fns []string, Fsname, ObjectName
|
||||
FsName: fsname,
|
||||
UpperFsName: Fsname,
|
||||
TestName: TestName,
|
||||
ObjectName: ObjectName,
|
||||
Fns: fns,
|
||||
}
|
||||
|
||||
@@ -126,12 +124,13 @@ func generateTestProgram(t *template.Template, fns []string, Fsname, ObjectName
|
||||
func main() {
|
||||
fns := findTestFunctions()
|
||||
t := template.Must(template.New("main").Parse(testProgram))
|
||||
generateTestProgram(t, fns, "Local", "Local")
|
||||
generateTestProgram(t, fns, "Swift", "Swift")
|
||||
generateTestProgram(t, fns, "S3", "S3")
|
||||
generateTestProgram(t, fns, "Drive", "Drive")
|
||||
generateTestProgram(t, fns, "GoogleCloudStorage", "Storage")
|
||||
generateTestProgram(t, fns, "Dropbox", "Dropbox")
|
||||
generateTestProgram(t, fns, "AmazonCloudDrive", "Acd")
|
||||
generateTestProgram(t, fns, "Local")
|
||||
generateTestProgram(t, fns, "Swift")
|
||||
generateTestProgram(t, fns, "S3")
|
||||
generateTestProgram(t, fns, "Drive")
|
||||
generateTestProgram(t, fns, "GoogleCloudStorage")
|
||||
generateTestProgram(t, fns, "Dropbox")
|
||||
generateTestProgram(t, fns, "AmazonCloudDrive")
|
||||
generateTestProgram(t, fns, "OneDrive")
|
||||
log.Printf("Done")
|
||||
}
|
||||
|
||||
@@ -66,10 +66,10 @@ func init() {
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: oauthutil.ConfigClientID,
|
||||
Help: "Google Application Client Id - leave blank to use rclone's.",
|
||||
Help: "Google Application Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: oauthutil.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret - leave blank to use rclone's.",
|
||||
Help: "Google Application Client Secret - leave blank normally.",
|
||||
}, {
|
||||
Name: "project_number",
|
||||
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
|
||||
@@ -118,8 +118,8 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// FsStorage represents a remote storage server
|
||||
type FsStorage struct {
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
@@ -130,35 +130,35 @@ type FsStorage struct {
|
||||
bucketAcl string // used when creating new buckets
|
||||
}
|
||||
|
||||
// FsObjectStorage describes a storage object
|
||||
// Object describes a storage object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type FsObjectStorage struct {
|
||||
storage *FsStorage // what this object is part of
|
||||
remote string // The remote path
|
||||
url string // download path
|
||||
md5sum string // The MD5Sum of the object
|
||||
bytes int64 // Bytes in the object
|
||||
modTime time.Time // Modified time of the object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
url string // download path
|
||||
md5sum string // The MD5Sum of the object
|
||||
bytes int64 // Bytes in the object
|
||||
modTime time.Time // Modified time of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *FsStorage) Name() string {
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *FsStorage) Root() string {
|
||||
func (f *Fs) Root() string {
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this FsStorage to a string
|
||||
func (f *FsStorage) String() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("Storage bucket %s", f.bucket)
|
||||
}
|
||||
@@ -180,7 +180,7 @@ func parsePath(path string) (bucket, directory string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an FsStorage from the path, bucket:path
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
oAuthClient, err := oauthutil.NewClient(name, storageConfig)
|
||||
if err != nil {
|
||||
@@ -192,7 +192,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &FsStorage{
|
||||
f := &Fs{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
@@ -237,10 +237,10 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsStorage) newFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
|
||||
o := &FsObjectStorage{
|
||||
storage: f,
|
||||
remote: remote,
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
o.setMetaData(info)
|
||||
@@ -257,14 +257,14 @@ func (f *FsStorage) newFsObjectWithInfo(remote string, info *storage.Object) fs.
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsStorage) NewFsObject(remote string) fs.Object {
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
|
||||
func (f *Fs) list(directories bool, fn func(string, *storage.Object)) {
|
||||
list := f.svc.Objects.List(f.bucket).Prefix(f.root).MaxResults(listChunks)
|
||||
if directories {
|
||||
list = list.Delimiter("/")
|
||||
@@ -303,7 +303,7 @@ func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
|
||||
}
|
||||
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *FsStorage) List() fs.ObjectsChan {
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
if f.bucket == "" {
|
||||
// Return no objects at top level list
|
||||
@@ -325,7 +325,7 @@ func (f *FsStorage) List() fs.ObjectsChan {
|
||||
}
|
||||
|
||||
// ListDir lists the buckets
|
||||
func (f *FsStorage) ListDir() fs.DirChan {
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
if f.bucket == "" {
|
||||
// List the buckets
|
||||
@@ -379,14 +379,17 @@ func (f *FsStorage) ListDir() fs.DirChan {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsStorage) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
o := &FsObjectStorage{storage: f, remote: remote}
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, o.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *FsStorage) Mkdir() error {
|
||||
func (f *Fs) Mkdir() error {
|
||||
_, err := f.svc.Buckets.Get(f.bucket).Do()
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
@@ -404,16 +407,19 @@ func (f *FsStorage) Mkdir() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
// to delete was not empty.
|
||||
func (f *FsStorage) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
if f.root != "" {
|
||||
return nil
|
||||
}
|
||||
return f.svc.Buckets.Delete(f.bucket).Do()
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *FsStorage) Precision() time.Duration {
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
@@ -426,18 +432,21 @@ func (f *FsStorage) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsStorage) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectStorage)
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj := &FsObjectStorage{storage: f, remote: remote}
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
srcBucket := srcObj.storage.bucket
|
||||
srcObject := srcObj.storage.root + srcObj.remote
|
||||
srcBucket := srcObj.fs.bucket
|
||||
srcObject := srcObj.fs.root + srcObj.remote
|
||||
dstBucket := f.bucket
|
||||
dstObject := f.root + remote
|
||||
newObject, err := f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
@@ -452,12 +461,12 @@ func (f *FsStorage) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *FsObjectStorage) Fs() fs.Fs {
|
||||
return o.storage
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectStorage) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
@@ -465,22 +474,22 @@ func (o *FsObjectStorage) String() string {
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *FsObjectStorage) Remote() string {
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectStorage) Md5sum() (string, error) {
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
return o.md5sum, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectStorage) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// setMetaData sets the fs data from a storage.Object
|
||||
func (o *FsObjectStorage) setMetaData(info *storage.Object) {
|
||||
func (o *Object) setMetaData(info *storage.Object) {
|
||||
o.url = info.MediaLink
|
||||
o.bytes = int64(info.Size)
|
||||
|
||||
@@ -515,11 +524,11 @@ func (o *FsObjectStorage) setMetaData(info *storage.Object) {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *FsObjectStorage) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
object, err := o.storage.svc.Objects.Get(o.storage.bucket, o.storage.root+o.remote).Do()
|
||||
object, err := o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
return err
|
||||
@@ -532,7 +541,7 @@ func (o *FsObjectStorage) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectStorage) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
// fs.Log(o, "Failed to read metadata: %s", err)
|
||||
@@ -549,14 +558,14 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *FsObjectStorage) SetModTime(modTime time.Time) {
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
// This only adds metadata so will perserve other metadata
|
||||
object := storage.Object{
|
||||
Bucket: o.storage.bucket,
|
||||
Name: o.storage.root + o.remote,
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
newObject, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do()
|
||||
newObject, err := o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
@@ -565,12 +574,12 @@ func (o *FsObjectStorage) SetModTime(modTime time.Time) {
|
||||
}
|
||||
|
||||
// Storable returns a boolean as to whether this object is storable
|
||||
func (o *FsObjectStorage) Storable() bool {
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
// This is slightly complicated by Go here insisting on
|
||||
// decoding the %2F in URLs into / which is legal in http, but
|
||||
// unfortunately not what the storage server wants.
|
||||
@@ -586,7 +595,7 @@ func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
|
||||
// alter any hex-escaped characters
|
||||
googleapi.SetOpaque(req.URL)
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
res, err := o.storage.client.Do(req)
|
||||
res, err := o.fs.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -600,16 +609,16 @@ func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectStorage) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
object := storage.Object{
|
||||
Bucket: o.storage.bucket,
|
||||
Name: o.storage.root + o.remote,
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
ContentType: fs.MimeType(o),
|
||||
Size: uint64(size),
|
||||
Updated: modTime.Format(timeFormatOut), // Doesn't get set
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
newObject, err := o.storage.svc.Objects.Insert(o.storage.bucket, &object).Media(in).Name(object.Name).PredefinedAcl(o.storage.objectAcl).Do()
|
||||
newObject, err := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in).Name(object.Name).PredefinedAcl(o.fs.objectAcl).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -619,11 +628,13 @@ func (o *FsObjectStorage) Update(in io.Reader, modTime time.Time, size int64) er
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectStorage) Remove() error {
|
||||
return o.storage.svc.Objects.Delete(o.storage.bucket, o.storage.root+o.remote).Do()
|
||||
func (o *Object) Remove() error {
|
||||
return o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsStorage{}
|
||||
var _ fs.Copier = &FsStorage{}
|
||||
var _ fs.Object = &FsObjectStorage{}
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*googlecloudstorage.FsObjectStorage)(nil))
|
||||
fstests.NilObject = fs.Object((*googlecloudstorage.Object)(nil))
|
||||
fstests.RemoteName = "TestGoogleCloudStorage:"
|
||||
}
|
||||
|
||||
|
||||
BIN
graphics/rclone-50x50.png
Normal file
BIN
graphics/rclone-50x50.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 5.7 KiB |
172
local/local.go
172
local/local.go
@@ -28,8 +28,8 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// FsLocal represents a local filesystem rooted at root
|
||||
type FsLocal struct {
|
||||
// Fs represents a local filesystem rooted at root
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // The root directory
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
@@ -37,9 +37,9 @@ type FsLocal struct {
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
}
|
||||
|
||||
// FsObjectLocal represents a local filesystem object
|
||||
type FsObjectLocal struct {
|
||||
local *FsLocal // The Fs this object is part of
|
||||
// Object represents a local filesystem object
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path
|
||||
path string // The local path
|
||||
info os.FileInfo // Interface for file info (always present)
|
||||
@@ -48,11 +48,11 @@ type FsObjectLocal struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFs constructs an FsLocal from the path
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
var err error
|
||||
|
||||
f := &FsLocal{
|
||||
f := &Fs{
|
||||
name: name,
|
||||
warned: make(map[string]struct{}),
|
||||
}
|
||||
@@ -72,31 +72,35 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *FsLocal) Name() string {
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *FsLocal) Root() string {
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this FsLocal to a string
|
||||
func (f *FsLocal) String() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Local file system at %s", f.root)
|
||||
}
|
||||
|
||||
// newFsObject makes a half completed FsObjectLocal
|
||||
func (f *FsLocal) newFsObject(remote string) *FsObjectLocal {
|
||||
// newFsObject makes a half completed Object
|
||||
func (f *Fs) newFsObject(remote string) *Object {
|
||||
remote = filepath.ToSlash(remote)
|
||||
dstPath := filterPath(filepath.Join(f.root, f.cleanUtf8(remote)))
|
||||
return &FsObjectLocal{local: f, remote: remote, path: dstPath}
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
}
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
|
||||
o := f.newFsObject(remote)
|
||||
if info != nil {
|
||||
o.info = info
|
||||
@@ -113,14 +117,14 @@ func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsLocal) NewFsObject(remote string) fs.Object {
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// List the path returning a channel of FsObjects
|
||||
//
|
||||
// Ignores everything which isn't Storable, eg links etc
|
||||
func (f *FsLocal) List() fs.ObjectsChan {
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error {
|
||||
@@ -158,7 +162,7 @@ func (f *FsLocal) List() fs.ObjectsChan {
|
||||
// CleanUtf8 makes string a valid UTF-8 string
|
||||
//
|
||||
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
|
||||
func (f *FsLocal) cleanUtf8(name string) string {
|
||||
func (f *Fs) cleanUtf8(name string) string {
|
||||
if !utf8.ValidString(name) {
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
|
||||
@@ -167,36 +171,13 @@ func (f *FsLocal) cleanUtf8(name string) string {
|
||||
name = string([]rune(name))
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
var name2 string
|
||||
if strings.HasPrefix(name, `\\?\`) {
|
||||
name2 = `\\?\`
|
||||
strings.TrimPrefix(name, `\\?\`)
|
||||
}
|
||||
if strings.HasPrefix(name, `//?/`) {
|
||||
name2 = `//?/`
|
||||
strings.TrimPrefix(name, `//?/`)
|
||||
}
|
||||
name2 += strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case '<', '>', '"', '|', '?', '*', '&':
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}, name)
|
||||
|
||||
if name2 != name {
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
name = name2
|
||||
}
|
||||
name = cleanWindowsName(f, name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// ListDir walks the path returning a channel of FsObjects
|
||||
func (f *FsLocal) ListDir() fs.DirChan {
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
@@ -239,7 +220,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
|
||||
}
|
||||
|
||||
// Put the FsObject to the local filesystem
|
||||
func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction - info filled in by Update()
|
||||
o := f.newFsObject(remote)
|
||||
err := o.Update(in, modTime, size)
|
||||
@@ -250,7 +231,7 @@ func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64
|
||||
}
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *FsLocal) Mkdir() error {
|
||||
func (f *Fs) Mkdir() error {
|
||||
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
||||
return os.MkdirAll(f.root, 0777)
|
||||
}
|
||||
@@ -258,12 +239,12 @@ func (f *FsLocal) Mkdir() error {
|
||||
// Rmdir removes the directory
|
||||
//
|
||||
// If it isn't empty it will return an error
|
||||
func (f *FsLocal) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
return os.Remove(f.root)
|
||||
}
|
||||
|
||||
// Precision of the file system
|
||||
func (f *FsLocal) Precision() (precision time.Duration) {
|
||||
func (f *Fs) Precision() (precision time.Duration) {
|
||||
f.precisionOk.Do(func() {
|
||||
f.precision = f.readPrecision()
|
||||
})
|
||||
@@ -271,7 +252,7 @@ func (f *FsLocal) Precision() (precision time.Duration) {
|
||||
}
|
||||
|
||||
// Read the precision
|
||||
func (f *FsLocal) readPrecision() (precision time.Duration) {
|
||||
func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
// Default precision of 1s
|
||||
precision = time.Second
|
||||
|
||||
@@ -327,7 +308,7 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *FsLocal) Purge() error {
|
||||
func (f *Fs) Purge() error {
|
||||
fi, err := os.Lstat(f.root)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -347,8 +328,8 @@ func (f *FsLocal) Purge() error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *FsLocal) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectLocal)
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
@@ -397,8 +378,8 @@ func (f *FsLocal) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *FsLocal) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*FsLocal)
|
||||
func (f *Fs) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debug(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
@@ -426,12 +407,12 @@ func (f *FsLocal) DirMove(src fs.Fs) error {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *FsObjectLocal) Fs() fs.Fs {
|
||||
return o.local
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectLocal) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
@@ -439,12 +420,12 @@ func (o *FsObjectLocal) String() string {
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *FsObjectLocal) Remote() string {
|
||||
return o.local.cleanUtf8(o.remote)
|
||||
func (o *Object) Remote() string {
|
||||
return o.fs.cleanUtf8(o.remote)
|
||||
}
|
||||
|
||||
// Md5sum calculates the Md5sum of a file returning a lowercase hex string
|
||||
func (o *FsObjectLocal) Md5sum() (string, error) {
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
if o.md5sum != "" {
|
||||
return o.md5sum, nil
|
||||
}
|
||||
@@ -472,17 +453,17 @@ func (o *FsObjectLocal) Md5sum() (string, error) {
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectLocal) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Size()
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *FsObjectLocal) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.info.ModTime()
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *FsObjectLocal) SetModTime(modTime time.Time) {
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
err := os.Chtimes(o.path, modTime, modTime)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to set mtime on file: %s", err)
|
||||
@@ -497,7 +478,7 @@ func (o *FsObjectLocal) SetModTime(modTime time.Time) {
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing if this object is storable
|
||||
func (o *FsObjectLocal) Storable() bool {
|
||||
func (o *Object) Storable() bool {
|
||||
mode := o.info.Mode()
|
||||
if mode&(os.ModeSymlink|os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||
fs.Debug(o, "Can't transfer non file/directory")
|
||||
@@ -512,9 +493,9 @@ func (o *FsObjectLocal) Storable() bool {
|
||||
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
|
||||
// object that is read
|
||||
type localOpenFile struct {
|
||||
o *FsObjectLocal // object that is open
|
||||
in io.ReadCloser // handle we are wrapping
|
||||
hash hash.Hash // currently accumulating MD5
|
||||
o *Object // object that is open
|
||||
in io.ReadCloser // handle we are wrapping
|
||||
hash hash.Hash // currently accumulating MD5
|
||||
}
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
@@ -539,7 +520,7 @@ func (file *localOpenFile) Close() (err error) {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
in, err = os.Open(o.path)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -554,13 +535,13 @@ func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
|
||||
}
|
||||
|
||||
// mkdirAll makes all the directories needed to store the object
|
||||
func (o *FsObjectLocal) mkdirAll() error {
|
||||
func (o *Object) mkdirAll() error {
|
||||
dir, _ := getDirFile(o.path)
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
// Update the object from in with modTime and size
|
||||
func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
err := o.mkdirAll()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -595,14 +576,14 @@ func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) erro
|
||||
}
|
||||
|
||||
// Stat a FsObject into info
|
||||
func (o *FsObjectLocal) lstat() error {
|
||||
func (o *Object) lstat() error {
|
||||
info, err := os.Lstat(o.path)
|
||||
o.info = info
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectLocal) Remove() error {
|
||||
func (o *Object) Remove() error {
|
||||
return os.Remove(o.path)
|
||||
}
|
||||
|
||||
@@ -664,9 +645,48 @@ func uncPath(s string) string {
|
||||
return s
|
||||
}
|
||||
|
||||
// cleanWindowsName will clean invalid Windows characters
|
||||
func cleanWindowsName(f *Fs, name string) string {
|
||||
original := name
|
||||
var name2 string
|
||||
if strings.HasPrefix(name, `\\?\`) {
|
||||
name2 = `\\?\`
|
||||
name = strings.TrimPrefix(name, `\\?\`)
|
||||
}
|
||||
if strings.HasPrefix(name, `//?/`) {
|
||||
name2 = `//?/`
|
||||
name = strings.TrimPrefix(name, `//?/`)
|
||||
}
|
||||
// Colon is allowed as part of a drive name X:\
|
||||
colonAt := strings.Index(name, ":")
|
||||
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
|
||||
// Copy to name2, which is unfiltered
|
||||
name2 += name[0 : colonAt+1]
|
||||
name = name[colonAt+1:]
|
||||
}
|
||||
|
||||
name2 += strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case '<', '>', '"', '|', '?', '*', ':':
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}, name)
|
||||
|
||||
if name2 != original && f != nil {
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Debug(f, "Replacing invalid characters in %q to %q", name, name2)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
return name2
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsLocal{}
|
||||
var _ fs.Purger = &FsLocal{}
|
||||
var _ fs.Mover = &FsLocal{}
|
||||
var _ fs.DirMover = &FsLocal{}
|
||||
var _ fs.Object = &FsObjectLocal{}
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*local.FsObjectLocal)(nil))
|
||||
fstests.NilObject = fs.Object((*local.Object)(nil))
|
||||
fstests.RemoteName = ""
|
||||
}
|
||||
|
||||
|
||||
@@ -50,3 +50,42 @@ func TestUncPaths(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var utf8Tests = [][2]string{
|
||||
[2]string{"ABC", "ABC"},
|
||||
[2]string{string([]byte{0x80}), "<22>"},
|
||||
[2]string{string([]byte{'a', 0x80, 'b'}), "a<>b"},
|
||||
}
|
||||
|
||||
func TestCleanUtf8(t *testing.T) {
|
||||
f := &Fs{}
|
||||
f.warned = make(map[string]struct{})
|
||||
for _, test := range utf8Tests {
|
||||
got := f.cleanUtf8(test[0])
|
||||
expect := test[1]
|
||||
if got != expect {
|
||||
t.Fatalf("got %q, expected %q", got, expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test Windows character replacements
|
||||
var testsWindows = [][2]string{
|
||||
[2]string{`c:\temp`, `c:\temp`},
|
||||
[2]string{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
[2]string{`//?/UNC/theserver/dir\file.txt`, `//?/UNC/theserver/dir\file.txt`},
|
||||
[2]string{"c:/temp", "c:/temp"},
|
||||
[2]string{"/temp/file.txt", "/temp/file.txt"},
|
||||
[2]string{`!\"#¤%&/()=;:*^?+-`, "!\\_#¤%&/()=;__^_+-"},
|
||||
[2]string{`<>"|?*:&\<>"|?*:&\<>"|?*:&`, "_______&\\_______&\\_______&"},
|
||||
}
|
||||
|
||||
func TestCleanWindows(t *testing.T) {
|
||||
for _, test := range testsWindows {
|
||||
got := cleanWindowsName(nil, test[0])
|
||||
expect := test[1]
|
||||
if got != expect {
|
||||
t.Fatalf("got %q, expected %q", got, expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ docs = [
|
||||
"about.md",
|
||||
"install.md",
|
||||
"docs.md",
|
||||
"filtering.md",
|
||||
"overview.md",
|
||||
"drive.md",
|
||||
"s3.md",
|
||||
@@ -23,6 +24,7 @@ docs = [
|
||||
"dropbox.md",
|
||||
"googlecloudstorage.md",
|
||||
"amazonclouddrive.md",
|
||||
"onedrive.md",
|
||||
"local.md",
|
||||
"changelog.md",
|
||||
"bugs.md",
|
||||
|
||||
@@ -30,11 +30,17 @@ const (
|
||||
// prompting the user to copy the code and paste it in the application.
|
||||
TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob"
|
||||
|
||||
// BindAddress is binding for local webserver when active
|
||||
bindAddress = "127.0.0.1:53682"
|
||||
// bindPort is the port that we bind the local webserver to
|
||||
bindPort = "53682"
|
||||
|
||||
// bindAddress is binding for local webserver when active
|
||||
bindAddress = "127.0.0.1:" + bindPort
|
||||
|
||||
// RedirectURL is redirect to local webserver when active
|
||||
RedirectURL = "http://" + bindAddress + "/"
|
||||
|
||||
// RedirectPublicURL is redirect to local webserver when active with public name
|
||||
RedirectPublicURL = "http://localhost.rclone.org:" + bindPort + "/"
|
||||
)
|
||||
|
||||
// oldToken contains an end-user's tokens.
|
||||
@@ -190,17 +196,19 @@ func Config(name string, config *oauth2.Config) error {
|
||||
// Detect whether we should use internal web server
|
||||
useWebServer := false
|
||||
switch config.RedirectURL {
|
||||
case RedirectURL:
|
||||
case RedirectURL, RedirectPublicURL:
|
||||
useWebServer = true
|
||||
case TitleBarRedirectURL:
|
||||
fmt.Printf("Use auto config?\n")
|
||||
fmt.Printf(" * Say Y if not sure\n")
|
||||
fmt.Printf(" * Say N if you are working on a remote or headless machine\n")
|
||||
fmt.Printf(" * Say N if you are working on a remote or headless machine or Y didn't work\n")
|
||||
useWebServer = fs.Confirm()
|
||||
// copy the config and set to use the internal webserver
|
||||
configCopy := *config
|
||||
config = &configCopy
|
||||
config.RedirectURL = RedirectURL
|
||||
if useWebServer {
|
||||
// copy the config and set to use the internal webserver
|
||||
configCopy := *config
|
||||
config = &configCopy
|
||||
config.RedirectURL = RedirectURL
|
||||
}
|
||||
}
|
||||
|
||||
// Make random state
|
||||
|
||||
141
onedrive/api/api.go
Normal file
141
onedrive/api/api.go
Normal file
@@ -0,0 +1,141 @@
|
||||
// Package api implements the API for one drive
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
rootURL = "https://api.onedrive.com/v1.0" // root URL for requests
|
||||
)
|
||||
|
||||
// Client contains the info to sustain the API
|
||||
type Client struct {
|
||||
c *http.Client
|
||||
}
|
||||
|
||||
// NewClient takes an oauth http.Client and makes a new api instance
|
||||
func NewClient(c *http.Client) *Client {
|
||||
return &Client{
|
||||
c: c,
|
||||
}
|
||||
}
|
||||
|
||||
// Opts contains parameters for Call, CallJSON etc
|
||||
type Opts struct {
|
||||
Method string
|
||||
Path string
|
||||
Absolute bool // Path is absolute
|
||||
Body io.Reader
|
||||
NoResponse bool // set to close Body
|
||||
ContentType string
|
||||
ContentLength *int64
|
||||
ContentRange string
|
||||
ExtraHeaders map[string]string
|
||||
}
|
||||
|
||||
// checkClose is a utility function used to check the return from
|
||||
// Close in a defer statement.
|
||||
func checkClose(c io.Closer, err *error) {
|
||||
cerr := c.Close()
|
||||
if *err == nil {
|
||||
*err = cerr
|
||||
}
|
||||
}
|
||||
|
||||
// DecodeJSON decodes resp.Body into result
|
||||
func DecodeJSON(resp *http.Response, result interface{}) (err error) {
|
||||
defer checkClose(resp.Body, &err)
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
return decoder.Decode(result)
|
||||
}
|
||||
|
||||
// Call makes the call and returns the http.Response
|
||||
//
|
||||
// if err != nil then resp.Body will need to be closed
|
||||
//
|
||||
// it will return resp if at all possible, even if err is set
|
||||
func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
|
||||
if opts == nil {
|
||||
return nil, fmt.Errorf("call() called with nil opts")
|
||||
}
|
||||
var url string
|
||||
if opts.Absolute {
|
||||
url = opts.Path
|
||||
} else {
|
||||
url = rootURL + opts.Path
|
||||
}
|
||||
req, err := http.NewRequest(opts.Method, url, opts.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if opts.ContentType != "" {
|
||||
req.Header.Add("Content-Type", opts.ContentType)
|
||||
}
|
||||
if opts.ContentLength != nil {
|
||||
req.ContentLength = *opts.ContentLength
|
||||
}
|
||||
if opts.ContentRange != "" {
|
||||
req.Header.Add("Content-Range", opts.ContentRange)
|
||||
}
|
||||
if opts.ExtraHeaders != nil {
|
||||
for k, v := range opts.ExtraHeaders {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
}
|
||||
req.Header.Add("User-Agent", fs.UserAgent)
|
||||
resp, err = api.c.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
// Decode error response
|
||||
errResponse := new(Error)
|
||||
err = DecodeJSON(resp, &errResponse)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
if errResponse.ErrorInfo.Code == "" {
|
||||
errResponse.ErrorInfo.Code = resp.Status
|
||||
}
|
||||
return resp, errResponse
|
||||
}
|
||||
if opts.NoResponse {
|
||||
return resp, resp.Body.Close()
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CallJSON runs Call and decodes the body as a JSON object into response (if not nil)
|
||||
//
|
||||
// If request is not nil then it will be JSON encoded as the body of the request
|
||||
//
|
||||
// It will return resp if at all possible, even if err is set
|
||||
func (api *Client) CallJSON(opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
// Set the body up as a JSON object if required
|
||||
if opts.Body == nil && request != nil {
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var newOpts = *opts
|
||||
newOpts.Body = bytes.NewBuffer(body)
|
||||
newOpts.ContentType = "application/json"
|
||||
opts = &newOpts
|
||||
}
|
||||
resp, err = api.Call(opts)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
if opts.NoResponse {
|
||||
return resp, nil
|
||||
}
|
||||
err = DecodeJSON(resp, response)
|
||||
return resp, err
|
||||
}
|
||||
213
onedrive/api/types.go
Normal file
213
onedrive/api/types.go
Normal file
@@ -0,0 +1,213 @@
|
||||
// Types passed and returned to and from the API
|
||||
|
||||
package api
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Error is returned from one drive when things go wrong
|
||||
type Error struct {
|
||||
ErrorInfo struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
InnerError struct {
|
||||
Code string `json:"code"`
|
||||
} `json:"innererror"`
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := e.ErrorInfo.Code
|
||||
if e.ErrorInfo.InnerError.Code != "" {
|
||||
out += ": " + e.ErrorInfo.InnerError.Code
|
||||
}
|
||||
out += ": " + e.ErrorInfo.Message
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Identity represents an identity of an actor. For example, and actor
|
||||
// can be a user, device, or application.
|
||||
type Identity struct {
|
||||
DisplayName string `json:"displayName"`
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// IdentitySet is a keyed collection of Identity objects. It is used
|
||||
// to represent a set of identities associated with various events for
|
||||
// an item, such as created by or last modified by.
|
||||
type IdentitySet struct {
|
||||
User Identity `json:"user"`
|
||||
Application Identity `json:"application"`
|
||||
Device Identity `json:"device"`
|
||||
}
|
||||
|
||||
// Quota groups storage space quota-related information on OneDrive into a single structure.
|
||||
type Quota struct {
|
||||
Total int `json:"total"`
|
||||
Used int `json:"used"`
|
||||
Remaining int `json:"remaining"`
|
||||
Deleted int `json:"deleted"`
|
||||
State string `json:"state"` // normal | nearing | critical | exceeded
|
||||
}
|
||||
|
||||
// Drive is a representation of a drive resource
|
||||
type Drive struct {
|
||||
ID string `json:"id"`
|
||||
DriveType string `json:"driveType"`
|
||||
Owner IdentitySet `json:"owner"`
|
||||
Quota Quota `json:"quota"`
|
||||
}
|
||||
|
||||
// Timestamp represents represents date and time information for the
|
||||
// OneDrive API, by using ISO 8601 and is always in UTC time.
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
out = (*time.Time)(t).UTC().AppendFormat(out, timeFormat)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ItemReference groups data needed to reference a OneDrive item
|
||||
// across the service into a single structure.
|
||||
type ItemReference struct {
|
||||
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
|
||||
ID string `json:"id"` // Unique identifier for the item. Read/Write.
|
||||
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||
}
|
||||
|
||||
// FolderFacet groups folder-related data on OneDrive into a single structure
|
||||
type FolderFacet struct {
|
||||
ChildCount int64 `json:"childCount"` // Number of children contained immediately within this container.
|
||||
}
|
||||
|
||||
// HashesType groups different types of hashes into a single structure, for an item on OneDrive.
|
||||
type HashesType struct {
|
||||
Sha1Hash string `json:"sha1Hash"` // base64 encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // base64 encoded CRC32 value of the file (if available)
|
||||
}
|
||||
|
||||
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||
type FileFacet struct {
|
||||
MimeType string `json:"mimeType"` // The MIME type for the file. This is determined by logic on the server and might not be the value provided when the file was uploaded.
|
||||
Hashes HashesType `json:"hashes"` // Hashes of the file's binary content, if available.
|
||||
}
|
||||
|
||||
// FileSystemInfoFacet contains properties that are reported by the
|
||||
// device's local file system for the local version of an item. This
|
||||
// facet can be used to specify the last modified date or created date
|
||||
// of the item as it was on the local device.
|
||||
type FileSystemInfoFacet struct {
|
||||
CreatedDateTime Timestamp `json:"createdDateTime"` // The UTC date and time the file was created on a client.
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // The UTC date and time the file was last modified on a client.
|
||||
}
|
||||
|
||||
// DeletedFacet indicates that the item on OneDrive has been
|
||||
// deleted. In this version of the API, the presence (non-null) of the
|
||||
// facet value indicates that the file was deleted. A null (or
|
||||
// missing) value indicates that the file is not deleted.
|
||||
type DeletedFacet struct {
|
||||
}
|
||||
|
||||
// Item represents metadata for an item in OneDrive
|
||||
type Item struct {
|
||||
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
|
||||
Name string `json:"name"` // The name of the item (filename and extension). Read-write.
|
||||
ETag string `json:"eTag"` // eTag for the entire item (metadata + content). Read-only.
|
||||
CTag string `json:"cTag"` // An eTag for the content of the item. This eTag is not changed if only the metadata is changed. Read-only.
|
||||
CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only.
|
||||
LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only.
|
||||
CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only.
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
|
||||
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
|
||||
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
|
||||
WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only.
|
||||
Description string `json:"description"` // Provide a user-visible description of the item. Read-write.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
// Image *ImageFacet `json:"image"` // Image metadata, if the item is an image. Read-only.
|
||||
// Photo *PhotoFacet `json:"photo"` // Photo metadata, if the item is a photo. Read-only.
|
||||
// Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only.
|
||||
// Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only.
|
||||
// Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only.
|
||||
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
|
||||
}
|
||||
|
||||
// ViewDeltaResponse is the response to the view delta method
|
||||
type ViewDeltaResponse struct {
|
||||
Value []Item `json:"value"` // An array of Item objects which have been created, modified, or deleted.
|
||||
NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of changes.
|
||||
DeltaLink string `json:"@odata.deltaLink"` // A URL returned instead of @odata.nextLink after all current changes have been returned. Used to read the next set of changes in the future.
|
||||
DeltaToken string `json:"@delta.token"` // A token value that can be used in the query string on manually-crafted calls to view.delta. Not needed if you're using nextLink and deltaLink.
|
||||
}
|
||||
|
||||
// ListChildrenResponse is the response to the list children method
|
||||
type ListChildrenResponse struct {
|
||||
Value []Item `json:"value"` // An array of Item objects
|
||||
NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of items.
|
||||
}
|
||||
|
||||
// CreateItemRequest is the request to create an item object
|
||||
type CreateItemRequest struct {
|
||||
Name string `json:"name"` // Name of the folder to be created.
|
||||
Folder FolderFacet `json:"folder"` // Empty Folder facet to indicate that folder is the type of resource to be created.
|
||||
ConflictBehavior string `json:"@name.conflictBehavior"` // Determines what to do if an item with a matching name already exists in this folder. Accepted values are: rename, replace, and fail (the default).
|
||||
}
|
||||
|
||||
// SetFileSystemInfo is used to Update an object's FileSystemInfo.
|
||||
type SetFileSystemInfo struct {
|
||||
FileSystemInfo FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
}
|
||||
|
||||
// CreateUploadResponse is the response from creating an upload session
|
||||
type CreateUploadResponse struct {
|
||||
UploadURL string `json:"uploadUrl"` // "https://sn3302.up.1drv.com/up/fe6987415ace7X4e1eF866337",
|
||||
ExpirationDateTime Timestamp `json:"expirationDateTime"` // "2015-01-29T09:21:55.523Z",
|
||||
NextExpectedRanges []string `json:"nextExpectedRanges"` // ["0-"]
|
||||
}
|
||||
|
||||
// UploadFragmentResponse is the response from uploading a fragment
|
||||
type UploadFragmentResponse struct {
|
||||
ExpirationDateTime Timestamp `json:"expirationDateTime"` // "2015-01-29T09:21:55.523Z",
|
||||
NextExpectedRanges []string `json:"nextExpectedRanges"` // ["0-"]
|
||||
}
|
||||
|
||||
// CopyItemRequest is the request to copy an item object
|
||||
//
|
||||
// Note: The parentReference should include either an id or path but
|
||||
// not both. If both are included, they need to reference the same
|
||||
// item or an error will occur.
|
||||
type CopyItemRequest struct {
|
||||
ParentReference ItemReference `json:"parentReference"` // Reference to the parent item the copy will be created in.
|
||||
Name *string `json:"name"` // Optional The new name for the copy. If this isn't provided, the same name will be used as the original.
|
||||
}
|
||||
|
||||
// AsyncOperationStatus provides information on the status of a asynchronous job progress.
|
||||
//
|
||||
// The following API calls return AsyncOperationStatus resources:
|
||||
//
|
||||
// Copy Item
|
||||
// Upload From URL
|
||||
type AsyncOperationStatus struct {
|
||||
Operation string `json:"operation"` // The type of job being run.
|
||||
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
|
||||
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
|
||||
}
|
||||
947
onedrive/onedrive.go
Normal file
947
onedrive/onedrive.go
Normal file
@@ -0,0 +1,947 @@
|
||||
// Package onedrive provides an interface to the Microsoft One Drive
|
||||
// object storage system.
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/dircache"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/onedrive/api"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "0000000044165769"
|
||||
rcloneClientSecret = "0+be4+jYw+7018HY6P3t/Izo+pTc+Yvt8+fy8NHU094="
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
"wl.signin", // Allow single sign-on capabilities
|
||||
"wl.offline_access", // Allow receiving a refresh token
|
||||
"onedrive.readwrite", // r/w perms to all of a user's OneDrive files
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://login.live.com/oauth20_authorize.srf",
|
||||
TokenURL: "https://login.live.com/oauth20_token.srf",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.Reveal(rcloneClientSecret),
|
||||
RedirectURL: oauthutil.RedirectPublicURL,
|
||||
}
|
||||
chunkSize = fs.SizeSuffix(10 * 1024 * 1024)
|
||||
uploadCutoff = fs.SizeSuffix(10 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.Info{
|
||||
Name: "onedrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config(name, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: oauthutil.ConfigClientID,
|
||||
Help: "Microsoft App Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: oauthutil.ConfigClientSecret,
|
||||
Help: "Microsoft App Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
pflag.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
|
||||
pflag.VarP(&uploadCutoff, "onedrive-upload-cutoff", "", "Cutoff for switching to chunked upload - must be <= 100MB")
|
||||
}
|
||||
|
||||
// Fs represents a remote one drive
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
srv *api.Client // the connection to the one drive server
|
||||
root string // the path we are working on
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a one drive object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
hasMetaData bool // whether info below has been set
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("One drive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Pattern to match a one drive path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an one drive 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := api.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/root:/" + replaceReservedChars(path),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, err := oauthutil.NewClient(name, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure One Drive: %v", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
srv: api.NewClient(oAuthClient),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
|
||||
// Get rootID
|
||||
rootInfo, _, err := f.readMetaDataForPath("")
|
||||
if err != nil || rootInfo.ID == "" {
|
||||
return nil, fmt.Errorf("Failed to get root: %v", err)
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, rootInfo.ID, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, rootInfo.ID, &newF)
|
||||
newF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
obj := newF.newObjectWithInfo(remote, nil)
|
||||
if obj == nil {
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
}
|
||||
// return a Fs Limited to this object
|
||||
return fs.NewLimited(&newF, obj), nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// rootSlash returns root with a slash on if it is empty, otherwise empty string
|
||||
func (f *Fs) rootSlash() string {
|
||||
if f.root == "" {
|
||||
return f.root
|
||||
}
|
||||
return f.root + "/"
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) fs.Object {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info
|
||||
o.setMetaData(info)
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
// logged already FsDebug("Failed to read info: %s", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// NewFsObject returns an Object from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
parent, ok := f.dirCache.GetInv(pathID)
|
||||
if !ok {
|
||||
return "", false, fmt.Errorf("Couldn't find parent ID")
|
||||
}
|
||||
path := leaf
|
||||
if parent != "" {
|
||||
path = parent + "/" + path
|
||||
}
|
||||
if f.dirCache.FoundRoot() {
|
||||
path = f.rootSlash() + path
|
||||
}
|
||||
info, resp, err := f.readMetaDataForPath(path)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return "", false, nil
|
||||
}
|
||||
return "", false, err
|
||||
}
|
||||
if info.Folder == nil {
|
||||
return "", false, fmt.Errorf("Found file when looking for folder")
|
||||
}
|
||||
return info.ID, true, nil
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
// fs.Debug(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
opts := api.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/items/" + pathID + "/children",
|
||||
}
|
||||
mkdir := api.CreateItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ConflictBehavior: "fail",
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
//fmt.Printf("...Id %q\n", *info.Id)
|
||||
return info.ID, nil
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
// User function to process a File item from listAll
|
||||
//
|
||||
// Should return true to finish processing
|
||||
type listAllFn func(*api.Item) bool
|
||||
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := api.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/items/" + dirID + "/children?top=1000",
|
||||
}
|
||||
OUTER:
|
||||
for {
|
||||
var result api.ListChildrenResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't list files: %v", err)
|
||||
break
|
||||
}
|
||||
if len(result.Value) == 0 {
|
||||
break
|
||||
}
|
||||
for i := range result.Value {
|
||||
item := &result.Value[i]
|
||||
isFolder := item.Folder != nil
|
||||
if isFolder {
|
||||
if filesOnly {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if directoriesOnly {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if item.Deleted != nil {
|
||||
continue
|
||||
}
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
}
|
||||
}
|
||||
if result.NextLink == "" {
|
||||
break
|
||||
}
|
||||
opts.Path = result.NextLink
|
||||
opts.Absolute = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Path should be directory path either "" or "path/"
|
||||
//
|
||||
// List the directory using a recursive list from the root
|
||||
//
|
||||
// This fetches the minimum amount of stuff but does more API calls
|
||||
// which makes it slow
|
||||
func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
|
||||
var subError error
|
||||
// Make the API request
|
||||
var wg sync.WaitGroup
|
||||
_, err := f.listAll(dirID, false, false, func(info *api.Item) bool {
|
||||
// Recurse on directories
|
||||
if info.Folder != nil {
|
||||
wg.Add(1)
|
||||
folder := path + info.Name + "/"
|
||||
fs.Debug(f, "Reading %s", folder)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := f.listDirRecursive(info.ID, folder, out)
|
||||
if err != nil {
|
||||
subError = err
|
||||
fs.ErrorLog(f, "Error reading %s:%s", folder, err)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
if fs := f.newObjectWithInfo(path+info.Name, info); fs != nil {
|
||||
out <- fs
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
wg.Wait()
|
||||
fs.Debug(f, "Finished reading %s", path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if subError != nil {
|
||||
return subError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List walks the path returning a channel of Objects
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "List failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// ListDir lists the directories
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
_, err := f.listAll(f.dirCache.RootID(), true, false, func(item *api.Item) bool {
|
||||
dir := &fs.Dir{
|
||||
Name: item.Name,
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
When: time.Time(item.LastModifiedDateTime),
|
||||
}
|
||||
if item.Folder != nil {
|
||||
dir.Count = item.Folder.ChildCount
|
||||
}
|
||||
out <- dir
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "ListDir failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindPath(remote, true)
|
||||
if err != nil {
|
||||
return nil, leaf, directoryID, err
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, leaf, directoryID, nil
|
||||
}
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
o, _, _, err := f.createObject(remote, modTime, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, o.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir() error {
|
||||
return f.dirCache.FindRoot(true)
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
opts := api.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/drive/items/" + id,
|
||||
NoResponse: true,
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(check bool) error {
|
||||
if f.root == "" {
|
||||
return fmt.Errorf("Can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID := dc.RootID()
|
||||
item, _, err := f.readMetaDataForPath(f.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item.Folder == nil {
|
||||
return fmt.Errorf("Not a folder")
|
||||
}
|
||||
if check && item.Folder.ChildCount != 0 {
|
||||
return fmt.Errorf("Folder not empty")
|
||||
}
|
||||
err = f.deleteObject(rootID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dirCache.ResetRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir() error {
|
||||
return f.purgeCheck(true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// waitForJob waits for the job with status in url to complete
|
||||
func (f *Fs) waitForJob(location string, o *Object) error {
|
||||
deadline := time.Now().Add(fs.Config.Timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
opts := api.Opts{
|
||||
Method: "GET",
|
||||
Path: location,
|
||||
Absolute: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode == 202 {
|
||||
var status api.AsyncOperationStatus
|
||||
err = api.DecodeJSON(resp, &status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if status.Status == "failed" || status.Status == "deleteFailed" {
|
||||
return fmt.Errorf("Async operation %q returned %q", status.Operation, status.Status)
|
||||
}
|
||||
} else {
|
||||
var info api.Item
|
||||
err = api.DecodeJSON(resp, &info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(&info)
|
||||
return nil
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return fmt.Errorf("Async operation didn't complete after %v", fs.Config.Timeout)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err := srcObj.readMetaData()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
opts := api.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/items/" + srcObj.id + "/action.copy",
|
||||
ExtraHeaders: map[string]string{"Prefer": "respond-async"},
|
||||
NoResponse: true,
|
||||
}
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copy := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
ID: directoryID,
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, ©, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// read location header
|
||||
location := resp.Header.Get("Location")
|
||||
if location == "" {
|
||||
return nil, fmt.Errorf("Didn't receive location header in copy response")
|
||||
}
|
||||
|
||||
// Wait for job to finish
|
||||
err = f.waitForJob(location, dstObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purgeCheck(false)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return replaceReservedChars(o.fs.rootSlash() + o.remote)
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
return "", nil // not supported by one drive
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
return 0
|
||||
}
|
||||
return o.size
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
if info.FileSystemInfo != nil {
|
||||
o.modTime = time.Time(info.FileSystemInfo.LastModifiedDateTime)
|
||||
} else {
|
||||
o.modTime = time.Time(info.LastModifiedDateTime)
|
||||
}
|
||||
o.id = info.ID
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
// leaf, directoryID, err := o.fs.dirCache.FindPath(o.remote, false)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
info, _, err := o.fs.readMetaDataForPath(o.srvPath())
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
return err
|
||||
}
|
||||
o.setMetaData(info)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
return time.Now()
|
||||
}
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
opts := api.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/drive/root:/" + o.srvPath(),
|
||||
}
|
||||
update := api.SetFileSystemInfo{
|
||||
FileSystemInfo: api.FileSystemInfoFacet{
|
||||
CreatedDateTime: api.Timestamp(modTime),
|
||||
LastModifiedDateTime: api.Timestamp(modTime),
|
||||
},
|
||||
}
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, err
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
info, err := o.setModTime(modTime)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %v", err)
|
||||
}
|
||||
o.setMetaData(info)
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
if o.id == "" {
|
||||
return nil, fmt.Errorf("Can't download no id")
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := api.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/items/" + o.id + "/content",
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession() (response *api.CreateUploadResponse, err error) {
|
||||
opts := api.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/root:/" + o.srvPath() + ":/upload.createSession",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// uploadFragment uploads a part
|
||||
func (o *Object) uploadFragment(url string, start int64, totalSize int64, buf []byte) (err error) {
|
||||
bufSize := int64(len(buf))
|
||||
opts := api.Opts{
|
||||
Method: "PUT",
|
||||
Path: url,
|
||||
Absolute: true,
|
||||
ContentLength: &bufSize,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start, start+bufSize-1, totalSize),
|
||||
Body: bytes.NewReader(buf),
|
||||
}
|
||||
var response api.UploadFragmentResponse
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// cancelUploadSession cancels an upload session
|
||||
func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
opts := api.Opts{
|
||||
Method: "DELETE",
|
||||
Path: url,
|
||||
Absolute: true,
|
||||
NoResponse: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
|
||||
if chunkSize%(320*1024) != 0 {
|
||||
return fmt.Errorf("Chunk size %d is not a multiple of 320k", chunkSize)
|
||||
}
|
||||
|
||||
// Create upload session
|
||||
fs.Debug(o, "Starting multipart upload")
|
||||
session, err := o.createUploadSession()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploadURL := session.UploadURL
|
||||
|
||||
// Cancel the session if something went wrong
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debug(o, "Cancelling multipart upload")
|
||||
cancelErr := o.cancelUploadSession(uploadURL)
|
||||
if cancelErr != nil {
|
||||
fs.Log(o, "Failed to cancel multipart upload: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Upload the chunks
|
||||
remaining := size
|
||||
position := int64(0)
|
||||
buf := make([]byte, int64(chunkSize))
|
||||
for remaining > 0 {
|
||||
n := int64(chunkSize)
|
||||
if remaining < n {
|
||||
n = remaining
|
||||
buf = buf[:n]
|
||||
}
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debug(o, "Uploading segment %d/%d size %d", position, size, n)
|
||||
err = o.uploadFragment(uploadURL, position, size, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
remaining -= n
|
||||
position += n
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) (err error) {
|
||||
var info *api.Item
|
||||
if size <= int64(uploadCutoff) {
|
||||
// This is for less than 100 MB of content
|
||||
var resp *http.Response
|
||||
opts := api.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/drive/root:/" + o.srvPath() + ":/content",
|
||||
Body: in,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(info)
|
||||
} else {
|
||||
err = o.uploadMultipart(in, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Set the mod time now and read metadata
|
||||
info, err = o.setModTime(modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(info)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
return o.fs.deleteObject(o.id)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
// _ fs.Copier = (*Fs)(nil)
|
||||
// _ fs.Mover = (*Fs)(nil)
|
||||
// _ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
56
onedrive/onedrive_test.go
Normal file
56
onedrive/onedrive_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
// Test OneDrive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package onedrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/ncw/rclone/onedrive"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*onedrive.Object)(nil))
|
||||
fstests.RemoteName = "TestOneDrive:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
91
onedrive/replace.go
Normal file
91
onedrive/replace.go
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Translate file names for one drive
|
||||
|
||||
OneDrive reserved characters
|
||||
|
||||
The following characters are OneDrive reserved characters, and can't
|
||||
be used in OneDrive folder and file names.
|
||||
|
||||
onedrive-reserved = "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|"
|
||||
onedrive-business-reserved
|
||||
= "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|" / "#" / "%"
|
||||
|
||||
Note: Folder names can't end with a period (.).
|
||||
|
||||
Note: OneDrive for Business file or folder names cannot begin with a
|
||||
tilde ('~').
|
||||
|
||||
*/
|
||||
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// Onedrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
':': ':', // FULLWIDTH COLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'.': '.', // FULLWIDTH FULL STOP
|
||||
'~': '~', // FULLWIDTH TILDE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
|
||||
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Folder names can't end with a period '.'
|
||||
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
|
||||
// OneDrive for Business file or folder names cannot begin with a tilde '~'
|
||||
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
|
||||
// Apparently file names can't start with space either
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Replace reserved characters
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
30
onedrive/replace_test.go
Normal file
30
onedrive/replace_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package onedrive
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"~leading tilde", "~leading tilde"},
|
||||
{"trailing dot.", "trailing dot."},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
{"~leading tilde/~leading tilde/~leading tilde", "~leading tilde/~leading tilde/~leading tilde"},
|
||||
{"trailing dot./trailing dot./trailing dot.", "trailing dot./trailing dot./trailing dot."},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -111,23 +111,37 @@ func emptyTokens(p *Pacer) {
|
||||
}
|
||||
}
|
||||
|
||||
// waitForPace waits for duration for the pace to arrive
|
||||
// returns the time that it arrived or a zero time
|
||||
func waitForPace(p *Pacer, duration time.Duration) (when time.Time) {
|
||||
select {
|
||||
case <-time.After(duration):
|
||||
return
|
||||
case <-p.pacer:
|
||||
return time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeginCall(t *testing.T) {
|
||||
p := New().SetMaxConnections(10).SetMinSleep(1 * time.Millisecond)
|
||||
emptyTokens(p)
|
||||
go p.beginCall()
|
||||
time.Sleep(2 * p.minSleep)
|
||||
if len(p.pacer) != 0 {
|
||||
if !waitForPace(p, 10*time.Millisecond).IsZero() {
|
||||
t.Errorf("beginSleep fired too early #1")
|
||||
}
|
||||
startTime := time.Now()
|
||||
p.pacer <- struct{}{}
|
||||
time.Sleep(2 * p.minSleep)
|
||||
if len(p.pacer) != 0 {
|
||||
t.Errorf("beginSleep fired too early #2")
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
connTime := time.Now()
|
||||
p.connTokens <- struct{}{}
|
||||
time.Sleep(2 * p.minSleep)
|
||||
if len(p.pacer) == 0 {
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
paceTime := waitForPace(p, 10*time.Millisecond)
|
||||
if paceTime.IsZero() {
|
||||
t.Errorf("beginSleep didn't fire")
|
||||
} else if paceTime.Sub(startTime) < 0 {
|
||||
t.Errorf("pace arrived before returning pace token")
|
||||
} else if paceTime.Sub(connTime) < 0 {
|
||||
t.Errorf("pace arrived before sending conn token")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,14 +149,17 @@ func TestBeginCallZeroConnections(t *testing.T) {
|
||||
p := New().SetMaxConnections(0).SetMinSleep(1 * time.Millisecond)
|
||||
emptyTokens(p)
|
||||
go p.beginCall()
|
||||
time.Sleep(2 * p.minSleep)
|
||||
if len(p.pacer) != 0 {
|
||||
if !waitForPace(p, 10*time.Millisecond).IsZero() {
|
||||
t.Errorf("beginSleep fired too early #1")
|
||||
}
|
||||
startTime := time.Now()
|
||||
p.pacer <- struct{}{}
|
||||
time.Sleep(2 * p.minSleep)
|
||||
if len(p.pacer) == 0 {
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
paceTime := waitForPace(p, 10*time.Millisecond)
|
||||
if paceTime.IsZero() {
|
||||
t.Errorf("beginSleep didn't fire")
|
||||
} else if paceTime.Sub(startTime) < 0 {
|
||||
t.Errorf("pace arrived before returning pace token")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
27
rclone.go
27
rclone.go
@@ -21,6 +21,7 @@ import (
|
||||
_ "github.com/ncw/rclone/dropbox"
|
||||
_ "github.com/ncw/rclone/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/local"
|
||||
_ "github.com/ncw/rclone/onedrive"
|
||||
_ "github.com/ncw/rclone/s3"
|
||||
_ "github.com/ncw/rclone/swift"
|
||||
)
|
||||
@@ -109,7 +110,7 @@ var Commands = []Command{
|
||||
},
|
||||
{
|
||||
Name: "ls",
|
||||
ArgsHelp: "[remote:path]",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
List all the objects in the the path with size and path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
@@ -120,7 +121,7 @@ var Commands = []Command{
|
||||
},
|
||||
{
|
||||
Name: "lsd",
|
||||
ArgsHelp: "[remote:path]",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
List all directories/containers/buckets in the the path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
@@ -131,7 +132,7 @@ var Commands = []Command{
|
||||
},
|
||||
{
|
||||
Name: "lsl",
|
||||
ArgsHelp: "[remote:path]",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
List all the objects in the the path with modification time,
|
||||
size and path.`,
|
||||
@@ -143,7 +144,7 @@ var Commands = []Command{
|
||||
},
|
||||
{
|
||||
Name: "md5sum",
|
||||
ArgsHelp: "[remote:path]",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.`,
|
||||
@@ -153,6 +154,24 @@ var Commands = []Command{
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
},
|
||||
{
|
||||
Name: "size",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
Returns the total size of objects in remote:path and the number
|
||||
of objects.`,
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
objects, size, err := fs.Count(fdst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Total objects: %d\n", objects)
|
||||
fmt.Printf("Total size: %v (%d bytes)\n", fs.SizeSuffix(size), size)
|
||||
return nil
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
},
|
||||
{
|
||||
Name: "mkdir",
|
||||
ArgsHelp: "remote:path",
|
||||
|
||||
189
s3/s3.go
189
s3/s3.go
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/ncw/rclone/fs"
|
||||
@@ -41,10 +42,10 @@ func init() {
|
||||
// AWS endpoints: http://docs.amazonwebservices.com/general/latest/gr/rande.html#s3_region
|
||||
Options: []fs.Option{{
|
||||
Name: "access_key_id",
|
||||
Help: "AWS Access Key ID.",
|
||||
Help: "AWS Access Key ID - leave blank for anonymous access.",
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password). ",
|
||||
Help: "AWS Secret Access Key (password) - leave blank for anonymous access.",
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
@@ -127,23 +128,24 @@ const (
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
)
|
||||
|
||||
// FsS3 represents a remote s3 server
|
||||
type FsS3 struct {
|
||||
name string // the name of the remote
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
bucket string // the bucket we are working on
|
||||
perm string // permissions for new buckets / objects
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
locationConstraint string // location constraint of new buckets
|
||||
// Fs represents a remote s3 server
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
bucket string // the bucket we are working on
|
||||
perm string // permissions for new buckets / objects
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
locationConstraint string // location constraint of new buckets
|
||||
}
|
||||
|
||||
// FsObjectS3 describes a s3 object
|
||||
type FsObjectS3 struct {
|
||||
// Object describes a s3 object
|
||||
type Object struct {
|
||||
// Will definitely have everything but meta which may be nil
|
||||
//
|
||||
// List will read everything but meta - to fill that in need to call
|
||||
// readMetaData
|
||||
s3 *FsS3 // what this object is part of
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
etag string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
@@ -154,20 +156,20 @@ type FsObjectS3 struct {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *FsS3) Name() string {
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *FsS3) Root() string {
|
||||
func (f *Fs) Root() string {
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this FsS3 to a string
|
||||
func (f *FsS3) String() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("S3 bucket %s", f.bucket)
|
||||
}
|
||||
@@ -190,17 +192,22 @@ func s3ParsePath(path string) (bucket, directory string, err error) {
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(name string) (*s3.S3, error) {
|
||||
func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
||||
// Make the auth
|
||||
accessKeyID := fs.ConfigFile.MustValue(name, "access_key_id")
|
||||
if accessKeyID == "" {
|
||||
return nil, errors.New("access_key_id not found")
|
||||
}
|
||||
secretAccessKey := fs.ConfigFile.MustValue(name, "secret_access_key")
|
||||
if secretAccessKey == "" {
|
||||
return nil, errors.New("secret_access_key not found")
|
||||
var auth *credentials.Credentials
|
||||
switch {
|
||||
case accessKeyID == "" && secretAccessKey == "":
|
||||
fs.Debug(name, "Using anonymous access for S3")
|
||||
auth = credentials.AnonymousCredentials
|
||||
case accessKeyID == "":
|
||||
return nil, nil, errors.New("access_key_id not found")
|
||||
case secretAccessKey == "":
|
||||
return nil, nil, errors.New("secret_access_key not found")
|
||||
default:
|
||||
auth = credentials.NewStaticCredentials(accessKeyID, secretAccessKey, "")
|
||||
}
|
||||
auth := credentials.NewStaticCredentials(accessKeyID, secretAccessKey, "")
|
||||
|
||||
endpoint := fs.ConfigFile.MustValue(name, "endpoint")
|
||||
region := fs.ConfigFile.MustValue(name, "region")
|
||||
@@ -218,12 +225,13 @@ func s3Connection(name string) (*s3.S3, error) {
|
||||
WithHTTPClient(fs.Config.Client()).
|
||||
WithS3ForcePathStyle(true)
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
c := s3.New(awsConfig)
|
||||
ses := session.New()
|
||||
c := s3.New(ses, awsConfig)
|
||||
if region == "other-v2-signature" {
|
||||
fs.Debug(name, "Using v2 auth")
|
||||
signer := func(req *request.Request) {
|
||||
// Ignore AnonymousCredentials object
|
||||
if req.Service.Config.Credentials == credentials.AnonymousCredentials {
|
||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
}
|
||||
sign(accessKeyID, secretAccessKey, req.HTTPRequest)
|
||||
@@ -236,23 +244,24 @@ func s3Connection(name string) (*s3.S3, error) {
|
||||
c.Handlers.Build.PushBack(func(r *request.Request) {
|
||||
r.HTTPRequest.Header.Set("User-Agent", fs.UserAgent)
|
||||
})
|
||||
return c, nil
|
||||
return c, ses, nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an FsS3 from the path, bucket:path
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
bucket, directory, err := s3ParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := s3Connection(name)
|
||||
c, ses, err := s3Connection(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &FsS3{
|
||||
f := &Fs{
|
||||
name: name,
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
// FIXME perm: s3.Private, // FIXME need user to specify
|
||||
root: directory,
|
||||
locationConstraint: fs.ConfigFile.MustValue(name, "location_constraint"),
|
||||
@@ -285,9 +294,9 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object {
|
||||
o := &FsObjectS3{
|
||||
s3: f,
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
@@ -313,14 +322,14 @@ func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object {
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsS3) NewFsObject(remote string) fs.Object {
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
func (f *FsS3) list(directories bool, fn func(string, *s3.Object)) {
|
||||
func (f *Fs) list(directories bool, fn func(string, *s3.Object)) {
|
||||
maxKeys := int64(listChunkSize)
|
||||
delimiter := ""
|
||||
if directories {
|
||||
@@ -385,7 +394,7 @@ func (f *FsS3) list(directories bool, fn func(string, *s3.Object)) {
|
||||
}
|
||||
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *FsS3) List() fs.ObjectsChan {
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
if f.bucket == "" {
|
||||
// Return no objects at top level list
|
||||
@@ -406,7 +415,7 @@ func (f *FsS3) List() fs.ObjectsChan {
|
||||
}
|
||||
|
||||
// ListDir lists the buckets
|
||||
func (f *FsS3) ListDir() fs.DirChan {
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
if f.bucket == "" {
|
||||
// List the buckets
|
||||
@@ -449,14 +458,17 @@ func (f *FsS3) ListDir() fs.DirChan {
|
||||
}
|
||||
|
||||
// Put the FsObject into the bucket
|
||||
func (f *FsS3) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
fs := &FsObjectS3{s3: f, remote: remote}
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return fs, fs.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *FsS3) Mkdir() error {
|
||||
func (f *Fs) Mkdir() error {
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.perm,
|
||||
@@ -475,10 +487,13 @@ func (f *FsS3) Mkdir() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsS3) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
if f.root != "" {
|
||||
return nil
|
||||
}
|
||||
req := s3.DeleteBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
}
|
||||
@@ -487,7 +502,7 @@ func (f *FsS3) Rmdir() error {
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
func (f *FsS3) Precision() time.Duration {
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
@@ -500,13 +515,13 @@ func (f *FsS3) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsS3) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectS3)
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.s3
|
||||
srcFs := srcObj.fs
|
||||
key := f.root + remote
|
||||
source := srcFs.bucket + "/" + srcFs.root + srcObj.remote
|
||||
req := s3.CopyObjectInput{
|
||||
@@ -525,12 +540,12 @@ func (f *FsS3) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *FsObjectS3) Fs() fs.Fs {
|
||||
return o.s3
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectS3) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
@@ -538,14 +553,14 @@ func (o *FsObjectS3) String() string {
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *FsObjectS3) Remote() string {
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectS3) Md5sum() (string, error) {
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
etag := strings.Trim(strings.ToLower(o.etag), `"`)
|
||||
// Check the etag is a valid md5sum
|
||||
if !matchMd5.MatchString(etag) {
|
||||
@@ -556,23 +571,23 @@ func (o *FsObjectS3) Md5sum() (string, error) {
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectS3) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *FsObjectS3) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.meta != nil {
|
||||
return nil
|
||||
}
|
||||
key := o.s3.root + o.remote
|
||||
key := o.fs.root + o.remote
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
Bucket: &o.fs.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
resp, err := o.s3.c.HeadObject(&req)
|
||||
resp, err := o.fs.c.HeadObject(&req)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
return err
|
||||
@@ -599,7 +614,7 @@ func (o *FsObjectS3) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectS3) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
@@ -620,7 +635,7 @@ func (o *FsObjectS3) ModTime() time.Time {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *FsObjectS3) SetModTime(modTime time.Time) {
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
@@ -630,18 +645,18 @@ func (o *FsObjectS3) SetModTime(modTime time.Time) {
|
||||
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
|
||||
// Copy the object to itself to update the metadata
|
||||
key := o.s3.root + o.remote
|
||||
sourceKey := o.s3.bucket + "/" + key
|
||||
key := o.fs.root + o.remote
|
||||
sourceKey := o.fs.bucket + "/" + key
|
||||
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
ACL: &o.s3.perm,
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.perm,
|
||||
Key: &key,
|
||||
CopySource: &sourceKey,
|
||||
Metadata: o.meta,
|
||||
MetadataDirective: &directive,
|
||||
}
|
||||
_, err = o.s3.c.CopyObject(&req)
|
||||
_, err = o.fs.c.CopyObject(&req)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
@@ -649,18 +664,18 @@ func (o *FsObjectS3) SetModTime(modTime time.Time) {
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
func (o *FsObjectS3) Storable() bool {
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectS3) Open() (in io.ReadCloser, err error) {
|
||||
key := o.s3.root + o.remote
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
key := o.fs.root + o.remote
|
||||
req := s3.GetObjectInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
Bucket: &o.fs.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
resp, err := o.s3.c.GetObject(&req)
|
||||
resp, err := o.fs.c.GetObject(&req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -668,14 +683,12 @@ func (o *FsObjectS3) Open() (in io.ReadCloser, err error) {
|
||||
}
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
opts := s3manager.UploadOptions{
|
||||
// PartSize: 64 * 1024 * 1024, use default
|
||||
Concurrency: 2, // limit concurrency
|
||||
LeavePartsOnError: false,
|
||||
S3: o.s3.c,
|
||||
}
|
||||
uploader := s3manager.NewUploader(&opts)
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = 2
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
})
|
||||
|
||||
// Set the mtime in the meta data
|
||||
metadata := map[string]*string{
|
||||
@@ -685,10 +698,10 @@ func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
// Guess the content type
|
||||
contentType := fs.MimeType(o)
|
||||
|
||||
key := o.s3.root + o.remote
|
||||
key := o.fs.root + o.remote
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
ACL: &o.s3.perm,
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.perm,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &contentType,
|
||||
@@ -707,17 +720,19 @@ func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectS3) Remove() error {
|
||||
key := o.s3.root + o.remote
|
||||
func (o *Object) Remove() error {
|
||||
key := o.fs.root + o.remote
|
||||
req := s3.DeleteObjectInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
Bucket: &o.fs.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
_, err := o.s3.c.DeleteObject(&req)
|
||||
_, err := o.fs.c.DeleteObject(&req)
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsS3{}
|
||||
var _ fs.Copier = &FsS3{}
|
||||
var _ fs.Object = &FsObjectS3{}
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*s3.FsObjectS3)(nil))
|
||||
fstests.NilObject = fs.Object((*s3.Object)(nil))
|
||||
fstests.RemoteName = "TestS3:"
|
||||
}
|
||||
|
||||
|
||||
396
swift/swift.go
396
swift/swift.go
@@ -2,16 +2,30 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
directoryMarkerContentType = "application/directory" // content type of directory marker objects
|
||||
directoryMarkerMaxSize = 1 // max size that directory marker objects can be
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
chunkSize = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -51,46 +65,48 @@ func init() {
|
||||
Name: "region",
|
||||
Help: "Region name - optional",
|
||||
},
|
||||
// snet = flag.Bool("swift-snet", false, "Use internal service network") // FIXME not implemented
|
||||
},
|
||||
})
|
||||
// snet = flag.Bool("swift-snet", false, "Use internal service network") // FIXME not implemented
|
||||
pflag.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.")
|
||||
}
|
||||
|
||||
// FsSwift represents a remote swift server
|
||||
type FsSwift struct {
|
||||
name string // name of this remote
|
||||
c swift.Connection // the connection to the swift server
|
||||
container string // the container we are working on
|
||||
root string // the path we are working on if any
|
||||
// Fs represents a remote swift server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
c swift.Connection // the connection to the swift server
|
||||
container string // the container we are working on
|
||||
segmentsContainer string // container to store the segments (if any) in
|
||||
root string // the path we are working on if any
|
||||
}
|
||||
|
||||
// FsObjectSwift describes a swift object
|
||||
// Object describes a swift object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type FsObjectSwift struct {
|
||||
swift *FsSwift // what this object is part of
|
||||
remote string // The remote path
|
||||
info swift.Object // Info from the swift object if known
|
||||
meta *swift.Metadata // The object metadata if known
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
info swift.Object // Info from the swift object if known
|
||||
headers *swift.Headers // The object headers if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *FsSwift) Name() string {
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *FsSwift) Root() string {
|
||||
func (f *Fs) Root() string {
|
||||
if f.root == "" {
|
||||
return f.container
|
||||
}
|
||||
return f.container + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this FsSwift to a string
|
||||
func (f *FsSwift) String() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("Swift container %s", f.container)
|
||||
}
|
||||
@@ -144,7 +160,7 @@ func swiftConnection(name string) (*swift.Connection, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an FsSwift from the path, container:path
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
container, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
@@ -154,17 +170,18 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &FsSwift{
|
||||
name: name,
|
||||
c: *c,
|
||||
container: container,
|
||||
root: directory,
|
||||
f := &Fs{
|
||||
name: name,
|
||||
c: *c,
|
||||
container: container,
|
||||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
}
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
_, _, err = f.c.Object(container, directory)
|
||||
if err == nil {
|
||||
// Check to see if the object exists - ignore directory markers
|
||||
_, headers, err := f.c.Object(container, directory)
|
||||
if err == nil && headers["Content-Type"] != directoryMarkerContentType {
|
||||
remote := path.Base(directory)
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
@@ -183,46 +200,50 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsSwift) newFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
|
||||
fs := &FsObjectSwift{
|
||||
swift: f,
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not meta
|
||||
fs.info = *info
|
||||
// Set info but not headers
|
||||
o.info = *info
|
||||
} else {
|
||||
err := fs.readMetaData() // reads info and meta, returning an error
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
// logged already FsDebug("Failed to read info: %s", err)
|
||||
fs.Debug(o, "Failed to read metadata: %s", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fs
|
||||
return o
|
||||
}
|
||||
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsSwift) NewFsObject(remote string) fs.Object {
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
// listFn is called from list and listContainerRoot to handle an object
|
||||
type listFn func(string, *swift.Object) error
|
||||
|
||||
// listContainerRoot lists the objects into the function supplied from
|
||||
// the container and root supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
func (f *FsSwift) list(directories bool, fn func(string, *swift.Object)) {
|
||||
func (f *Fs) listContainerRoot(container, root string, directories bool, fn listFn) error {
|
||||
// Options for ObjectsWalk
|
||||
opts := swift.ObjectsOpts{
|
||||
Prefix: f.root,
|
||||
Prefix: root,
|
||||
Limit: 256,
|
||||
}
|
||||
if directories {
|
||||
opts.Delimiter = '/'
|
||||
}
|
||||
rootLength := len(f.root)
|
||||
err := f.c.ObjectsWalk(f.container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
objects, err := f.c.Objects(f.container, opts)
|
||||
rootLength := len(root)
|
||||
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
objects, err := f.c.Objects(container, opts)
|
||||
if err == nil {
|
||||
for i := range objects {
|
||||
object := &objects[i]
|
||||
@@ -233,24 +254,36 @@ func (f *FsSwift) list(directories bool, fn func(string, *swift.Object)) {
|
||||
}
|
||||
object.Name = object.Name[:len(object.Name)-1]
|
||||
}
|
||||
if !strings.HasPrefix(object.Name, f.root) {
|
||||
if !strings.HasPrefix(object.Name, root) {
|
||||
fs.Log(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
fn(remote, object)
|
||||
err = fn(remote, object)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return objects, err
|
||||
})
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
func (f *Fs) list(directories bool, fn listFn) {
|
||||
err := f.listContainerRoot(f.container, f.root, directories, fn)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't read container %q: %s", f.container, err)
|
||||
}
|
||||
}
|
||||
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *FsSwift) List() fs.ObjectsChan {
|
||||
// listFiles walks the path returning a channel of FsObjects
|
||||
//
|
||||
// if ignoreStorable is set then it outputs the file even if Storable() is false
|
||||
func (f *Fs) listFiles(ignoreStorable bool) fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
if f.container == "" {
|
||||
// Return no objects at top level list
|
||||
@@ -261,18 +294,28 @@ func (f *FsSwift) List() fs.ObjectsChan {
|
||||
// List the objects
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(false, func(remote string, object *swift.Object) {
|
||||
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
|
||||
out <- fs
|
||||
f.list(false, func(remote string, object *swift.Object) error {
|
||||
if o := f.newFsObjectWithInfo(remote, object); o != nil {
|
||||
// Storable does a full metadata read on 0 size objects which might be manifest files
|
||||
storable := o.Storable()
|
||||
if storable || ignoreStorable {
|
||||
out <- o
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
return f.listFiles(false)
|
||||
}
|
||||
|
||||
// ListDir lists the containers
|
||||
func (f *FsSwift) ListDir() fs.DirChan {
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
if f.container == "" {
|
||||
// List the containers
|
||||
@@ -296,12 +339,13 @@ func (f *FsSwift) ListDir() fs.DirChan {
|
||||
// List the directories in the path in the container
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(true, func(remote string, object *swift.Object) {
|
||||
f.list(true, func(remote string, object *swift.Object) error {
|
||||
out <- &fs.Dir{
|
||||
Name: remote,
|
||||
Bytes: object.Bytes,
|
||||
Count: 0,
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
}
|
||||
@@ -313,29 +357,43 @@ func (f *FsSwift) ListDir() fs.DirChan {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsSwift) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
fs := &FsObjectSwift{swift: f, remote: remote}
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return fs, fs.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *FsSwift) Mkdir() error {
|
||||
func (f *Fs) Mkdir() error {
|
||||
return f.c.ContainerCreate(f.container, nil)
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
// Rmdir deletes the container if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsSwift) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
if f.root != "" {
|
||||
return nil
|
||||
}
|
||||
return f.c.ContainerDelete(f.container)
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
func (f *FsSwift) Precision() time.Duration {
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Purge deletes all the files and directories
|
||||
//
|
||||
// Implemented here so we can make sure we delete directory markers
|
||||
func (f *Fs) Purge() error {
|
||||
fs.DeleteFiles(f.listFiles(true))
|
||||
return f.Rmdir()
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
@@ -345,13 +403,13 @@ func (f *FsSwift) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsSwift) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectSwift)
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.swift
|
||||
srcFs := srcObj.fs
|
||||
_, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -362,12 +420,12 @@ func (f *FsSwift) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *FsObjectSwift) Fs() fs.Fs {
|
||||
return o.swift
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectSwift) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
@@ -375,35 +433,54 @@ func (o *FsObjectSwift) String() string {
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *FsObjectSwift) Remote() string {
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectSwift) Md5sum() (string, error) {
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
isManifest, err := o.isManifestFile()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if isManifest {
|
||||
fs.Debug(o, "Returning empty Md5sum for swift manifest file")
|
||||
return "", nil
|
||||
}
|
||||
return strings.ToLower(o.info.Hash), nil
|
||||
}
|
||||
|
||||
// isManifestFile checks for manifest header
|
||||
func (o *Object) isManifestFile() (bool, error) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
if err == swift.ObjectNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
_, isManifestFile := (*o.headers)["X-Object-Manifest"]
|
||||
return isManifestFile, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectSwift) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Bytes
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *FsObjectSwift) readMetaData() (err error) {
|
||||
if o.meta != nil {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.headers != nil {
|
||||
return nil
|
||||
}
|
||||
info, h, err := o.swift.c.Object(o.swift.container, o.swift.root+o.remote)
|
||||
info, h, err := o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
return err
|
||||
}
|
||||
meta := h.ObjectMetadata()
|
||||
o.info = info
|
||||
o.meta = &meta
|
||||
o.headers = &h
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -412,13 +489,13 @@ func (o *FsObjectSwift) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectSwift) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
// fs.Log(o, "Failed to read metadata: %s", err)
|
||||
fs.Debug(o, "Failed to read metadata: %s", err)
|
||||
return o.info.LastModified
|
||||
}
|
||||
modTime, err := o.meta.GetModTime()
|
||||
modTime, err := o.headers.ObjectMetadata().GetModTime()
|
||||
if err != nil {
|
||||
// fs.Log(o, "Failed to read mtime from object: %s", err)
|
||||
return o.info.LastModified
|
||||
@@ -427,15 +504,20 @@ func (o *FsObjectSwift) ModTime() time.Time {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *FsObjectSwift) SetModTime(modTime time.Time) {
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(o, "Failed to read metadata: %s", err)
|
||||
return
|
||||
}
|
||||
o.meta.SetModTime(modTime)
|
||||
err = o.swift.c.ObjectUpdate(o.swift.container, o.swift.root+o.remote, o.meta.ObjectHeaders())
|
||||
meta := o.headers.ObjectMetadata()
|
||||
meta.SetModTime(modTime)
|
||||
newHeaders := meta.ObjectHeaders()
|
||||
for k, v := range newHeaders {
|
||||
(*o.headers)[k] = v
|
||||
}
|
||||
err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
@@ -443,39 +525,163 @@ func (o *FsObjectSwift) SetModTime(modTime time.Time) {
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *FsObjectSwift) Storable() bool {
|
||||
return true
|
||||
//
|
||||
// It reads the metadata for <= directoryMarkerMaxSize byte objects then compares the
|
||||
// Content-Type to directoryMarkerContentType - that makes it a
|
||||
// directory marker which is not storable.
|
||||
func (o *Object) Storable() bool {
|
||||
if o.info.Bytes > directoryMarkerMaxSize {
|
||||
return true
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read metadata: %s", err)
|
||||
return true
|
||||
}
|
||||
contentType := (*o.headers)["Content-Type"]
|
||||
return contentType != directoryMarkerContentType
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectSwift) Open() (in io.ReadCloser, err error) {
|
||||
in, _, err = o.swift.c.ObjectOpen(o.swift.container, o.swift.root+o.remote, true, nil)
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, true, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// min returns the smallest of x, y
|
||||
func min(x, y int64) int64 {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
// removeSegments removes any old segments from o
|
||||
//
|
||||
// if except is passed in then segments with that prefix won't be deleted
|
||||
func (o *Object) removeSegments(except string) error {
|
||||
segmentsRoot := o.fs.root + o.remote + "/"
|
||||
err := o.fs.listContainerRoot(o.fs.segmentsContainer, segmentsRoot, false, func(remote string, object *swift.Object) error {
|
||||
if except != "" && strings.HasPrefix(remote, except) {
|
||||
// fs.Debug(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.fs.segmentsContainer)
|
||||
return nil
|
||||
}
|
||||
segmentPath := segmentsRoot + remote
|
||||
fs.Debug(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
|
||||
return o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the segments container if empty, ignore errors
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
if err == nil {
|
||||
fs.Debug(o, "Removed empty container %q", o.fs.segmentsContainer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateChunks updates the existing object using chunks to a separate
|
||||
// container. It returns a string which prefixes current segments.
|
||||
func (o *Object) updateChunks(in io.Reader, headers swift.Headers, size int64) (string, error) {
|
||||
// Create the segmentsContainer if it doesn't exist
|
||||
err := o.fs.c.ContainerCreate(o.fs.segmentsContainer, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Upload the chunks
|
||||
left := size
|
||||
i := 0
|
||||
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
|
||||
segmentsPath := fmt.Sprintf("%s%s/%s", o.fs.root, o.remote, uniquePrefix)
|
||||
for left > 0 {
|
||||
n := min(left, int64(chunkSize))
|
||||
headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
|
||||
segmentReader := io.LimitReader(in, n)
|
||||
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
|
||||
fs.Debug(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
|
||||
_, err := o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
left -= n
|
||||
i++
|
||||
}
|
||||
// Upload the manifest
|
||||
headers["X-Object-Manifest"] = fmt.Sprintf("%s/%s", o.fs.segmentsContainer, segmentsPath)
|
||||
headers["Content-Length"] = "0" // set Content-Length as we know it
|
||||
emptyReader := bytes.NewReader(nil)
|
||||
manifestName := o.fs.root + o.remote
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", "", headers)
|
||||
return uniquePrefix + "/", err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectSwift) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
// Set the mtime
|
||||
m := swift.Metadata{}
|
||||
m.SetModTime(modTime)
|
||||
_, err := o.swift.c.ObjectPut(o.swift.container, o.swift.root+o.remote, in, true, "", "", m.ObjectHeaders())
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
// Note whether this has a manifest before starting
|
||||
isManifest, err := o.isManifestFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the mtime
|
||||
m := swift.Metadata{}
|
||||
m.SetModTime(modTime)
|
||||
headers := m.ObjectHeaders()
|
||||
uniquePrefix := ""
|
||||
if size > int64(chunkSize) {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it
|
||||
_, err := o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", "", headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If file was a manifest then remove old/all segments
|
||||
if isManifest {
|
||||
err = o.removeSegments(uniquePrefix)
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to remove old segments - carrying on with upload: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
err = o.readMetaData()
|
||||
return err
|
||||
o.headers = nil // wipe old metadata
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectSwift) Remove() error {
|
||||
return o.swift.c.ObjectDelete(o.swift.container, o.swift.root+o.remote)
|
||||
func (o *Object) Remove() error {
|
||||
isManifestFile, err := o.isManifestFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove file/manifest first
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// ...then segments if required
|
||||
if isManifestFile {
|
||||
err = o.removeSegments("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsSwift{}
|
||||
var _ fs.Copier = &FsSwift{}
|
||||
var _ fs.Object = &FsObjectSwift{}
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*swift.FsObjectSwift)(nil))
|
||||
fstests.NilObject = fs.Object((*swift.Object)(nil))
|
||||
fstests.RemoteName = "TestSwift:"
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user