1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-12 05:23:55 +00:00

Compare commits

..

8 Commits
v1.12 ... v1.14

Author SHA1 Message Date
Nick Craig-Wood
d9fcc32f70 Version v1.14 2015-05-21 20:13:40 +01:00
Nick Craig-Wood
870a9fc3b2 local: fix encoding of non utf-8 file names - fixes #66 2015-05-21 18:40:16 +01:00
Nick Craig-Wood
8e3703abeb drive: docs about rate limiting 2015-05-21 18:39:46 +01:00
Nick Craig-Wood
ba81277bbe google cloud storage: Fix compile after API change in "google.golang.org/api/storage/v1"
The breaking change is: google-api-go-generator: remove underscores from identifiers.
2015-05-19 08:18:26 +01:00
Nick Craig-Wood
88293a4b8a Version v1.13 2015-05-10 12:39:06 +01:00
Nick Craig-Wood
981104519e Revise documentation (especially sync) - fixes #39 2015-05-10 12:17:24 +01:00
Nick Craig-Wood
1d254a3674 Implement --timeout and --conntimeout - fixes #54
NB dropbox still to do
2015-05-10 11:29:55 +01:00
Nick Craig-Wood
f88d171afd s3: ignore etags from multipart uploads which aren't md5sums - fixes #56 2015-05-10 11:29:55 +01:00
14 changed files with 219 additions and 92 deletions

View File

@@ -75,16 +75,16 @@ Subcommands
rclone copy source:path dest:path
Copy the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
unchanged files, testing by size and modification time or
MD5SUM. Doesn't delete files from the destination.
rclone sync source:path dest:path
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
size. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the `--dry-run` flag.
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the `--dry-run` flag.
rclone ls [remote:path]
@@ -96,7 +96,8 @@ List all directories/containers/buckets in the the path.
rclone lsl [remote:path]
List all the objects in the the path with modification time, size and path.
List all the objects in the the path with modification time,
size and path.
rclone md5sum [remote:path]
@@ -133,14 +134,16 @@ This help.
General options:
```
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix K|M|G
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
--checkers=8: Number of checkers to run in parallel.
--config="~/.rclone.conf": Config file.
--contimeout=1m0s: Connect timeout
-n, --dry-run=false: Do a trial run with no permanent changes
--log-file="": Log everything to this file
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats (0 to disable)
--timeout=5m0s: IO idle timeout
--transfers=4: Number of file transfers to run in parallel.
-v, --verbose=false: Print lots more stuff
-V, --version=false: Print the version number
@@ -172,12 +175,7 @@ So to copy a local directory to a swift container called backup:
rclone sync /home/source swift:backup
The modified time is stored as metadata on the object as
`X-Object-Meta-Mtime` as floating point since the epoch.
This is a defacto standard (used in the official python-swiftclient
amongst others) for storing the modification time (as read using
os.Stat) for an object.
For more help see the [online docs on Openstack Swift](http://rclone.org/swift).
Amazon S3
---------
@@ -189,8 +187,7 @@ So to copy a local directory to a s3 container called backup
rclone sync /home/source s3:backup
The modified time is stored as metadata on the object as
`X-Amz-Meta-Mtime` as floating point since the epoch.
For more help see the [online docs on Amazon S3](http://rclone.org/s3).
Google drive
------------
@@ -205,7 +202,7 @@ To copy a local directory to a drive directory called backup
rclone copy /home/source remote:backup
Google drive stores modification times accurate to 1 ms natively.
For more help see the [online docs on Google Drive](http://rclone.org/drive).
Dropbox
-------
@@ -220,10 +217,7 @@ To copy a local directory to a drive directory called backup
rclone copy /home/source dropbox:backup
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
a Dropbox datastore called "rclone". Dropbox datastores are limited
to 100,000 rows so this is the maximum number of files rclone can
manage on Dropbox.
For more help see the [online docs on Dropbox](http://rclone.org/dropbox).
Google Cloud Storage
--------------------
@@ -239,9 +233,7 @@ To copy a local directory to a google cloud storage directory called backup
rclone copy /home/source remote:backup
Google google cloud storage stores md5sums natively and rclone stores
modification times as metadata on the object, under the "mtime" key in
RFC3339 format accurate to 1ns.
For more help see the [online docs on Google Cloud Storage](http://rclone.org/googlecloudstorage/).
Single file copies
------------------
@@ -270,6 +262,14 @@ Bugs
Changelog
---------
* v1.14 - 2015-05-21
* local: fix encoding of non utf-8 file names - fixes a duplicate file problem
* drive: docs about rate limiting
* google cloud storage: Fix compile after API change in "google.golang.org/api/storage/v1"
* v1.13 - 2015-05-10
* Revise documentation (especially sync)
* Implement --timeout and --conntimeout
* s3: ignore etags from multipart uploads which aren't md5sums
* v1.12 - 2015-03-15
* drive: Use chunked upload for files above a certain size
* drive: add --drive-chunk-size and --drive-upload-cutoff parameters

View File

@@ -58,16 +58,16 @@ Subcommands
rclone copy source:path dest:path
Copy the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
unchanged files, testing by size and modification time or
MD5SUM. Doesn't delete files from the destination.
rclone sync source:path dest:path
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
size. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the `--dry-run` flag.
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the `--dry-run` flag.
rclone ls [remote:path]
@@ -79,7 +79,8 @@ List all directories/containers/buckets in the the path.
rclone lsl [remote:path]
List all the objects in the the path with modification time, size and path.
List all the objects in the the path with modification time,
size and path.
rclone md5sum [remote:path]
@@ -114,14 +115,16 @@ Enter an interactive configuration session.
This help.
```
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix K|M|G
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
--checkers=8: Number of checkers to run in parallel.
--config="~/.rclone.conf": Config file.
--contimeout=1m0s: Connect timeout
-n, --dry-run=false: Do a trial run with no permanent changes
--log-file="": Log everything to this file
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats (0 to disable)
--timeout=5m0s: IO idle timeout
--transfers=4: Number of file transfers to run in parallel.
-v, --verbose=false: Print lots more stuff
-V, --version=false: Print the version number

View File

@@ -2,34 +2,34 @@
title: "Rclone downloads"
description: "Download rclone binaries for your OS."
type: page
date: "2015-03-15"
date: "2015-05-21"
---
Rclone Download v1.12
Rclone Download v1.14
=====================
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.12-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.12-windows-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.14-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.14-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.12-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.12-osx-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.14-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.14-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.12-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.12-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.12-linux-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.14-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.14-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.14-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.12-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.12-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.12-freebsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.14-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.14-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.14-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.12-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.12-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.12-netbsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.14-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.14-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.14-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.12-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.12-openbsd-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.14-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.14-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.12-plan9-386.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.14-plan9-386.zip)
Older downloads can be found [here](http://downloads.rclone.org/)

View File

@@ -1,7 +1,7 @@
---
title: "Google drive"
description: "Rclone docs for Google drive"
date: "2014-04-26"
date: "2015-05-10"
---
<i class="fa fa-google"></i> Google Drive
@@ -73,3 +73,24 @@ Modified time
-------------
Google drive stores modification times accurate to 1 ms.
Revisions
---------
Google drive stores revisions of files. When you upload a change to
an existing file to google drive using rclone it will create a new
revision of that file.
Revisions follow the standard google policy which at time of writing
was
* They are deleted after 30 days or 100 revisions (whatever comes first).
* They do not count towards a user storage quota.
Limitations
-----------
Drive has quite a lot of rate limiting. This causes rclone to be
limited to transferring about 2 files per second only. Individual
files may be transferred much faster at 100s of MBytes/s but lots of
small files can take a long time.

View File

@@ -23,3 +23,18 @@ Rclone reads and writes the modified time using an accuracy determined by
the OS. Typically this is 1ns on Linux, 10 ns on Windows and 1 Second
on OS X.
Filenames
---------
Filenames are expected to be encoded in UTF-8 on disk. This is the
normal case for Windows and OS X. There is a bit more uncertainty in
the Linux world, but new distributions will have UTF-8 encoded files
names.
If an invalid (non-UTF8) filename is read, the invalid caracters will
be replaced with the unicode replacement character, '<27>'. `rclone`
will emit a warning in this case, eg
```
Local file system at .: Replacing invalid UTF-8 characters in "gro\xdf"
```

View File

@@ -17,6 +17,20 @@ This is a JSON decode error - from Update / UploadByChunk
- Caused by 500 error from dropbox
- See https://github.com/stacktic/dropbox/issues/1
- Possibly confusing dropbox with excess concurrency?
FIXME implement timeouts - need to get "github.com/stacktic/dropbox"
and hence "golang.org/x/oauth2" which uses DefaultTransport unless it
is set in the context passed into .Client()
func (db *Dropbox) client() *http.Client {
return db.config.Client(oauth2.NoContext, db.token)
}
// HTTPClient is the context key to use with golang.org/x/net/context's
// WithValue function to associate an *http.Client value with a context.
var HTTPClient ContextKey
So pass in a context with HTTPClient set...
*/
import (

View File

@@ -7,6 +7,7 @@ import (
"fmt"
"log"
"math"
"net/http"
"os"
"os/user"
"path"
@@ -16,6 +17,7 @@ import (
"time"
"github.com/Unknwon/goconfig"
"github.com/mreiferson/go-httpclient"
"github.com/ogier/pflag"
)
@@ -36,14 +38,16 @@ var (
// Global config
Config = &ConfigInfo{}
// Flags
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
bwLimit SizeSuffix
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
bwLimit SizeSuffix
)
func init() {
@@ -112,12 +116,48 @@ var _ pflag.Value = (*SizeSuffix)(nil)
// Filesystem config options
type ConfigInfo struct {
Verbose bool
Quiet bool
DryRun bool
ModifyWindow time.Duration
Checkers int
Transfers int
Verbose bool
Quiet bool
DryRun bool
ModifyWindow time.Duration
Checkers int
Transfers int
ConnectTimeout time.Duration // Connect timeout
Timeout time.Duration // Data channel timeout
}
// Transport returns an http.RoundTripper with the correct timeouts
func (ci *ConfigInfo) Transport() http.RoundTripper {
return &httpclient.Transport{
Proxy: http.ProxyFromEnvironment,
MaxIdleConnsPerHost: ci.Checkers + ci.Transfers + 1,
// ConnectTimeout, if non-zero, is the maximum amount of time a dial will wait for
// a connect to complete.
ConnectTimeout: ci.ConnectTimeout,
// ResponseHeaderTimeout, if non-zero, specifies the amount of
// time to wait for a server's response headers after fully
// writing the request (including its body, if any). This
// time does not include the time to read the response body.
ResponseHeaderTimeout: ci.Timeout,
// RequestTimeout, if non-zero, specifies the amount of time for the entire
// request to complete (including all of the above timeouts + entire response body).
// This should never be less than the sum total of the above two timeouts.
//RequestTimeout: NOT SET,
// ReadWriteTimeout, if non-zero, will set a deadline for every Read and
// Write operation on the request connection.
ReadWriteTimeout: ci.Timeout,
}
}
// Transport returns an http.Client with the correct timeouts
func (ci *ConfigInfo) Client() *http.Client {
return &http.Client{
Transport: ci.Transport(),
}
}
// Find the config directory
@@ -152,6 +192,8 @@ func LoadConfig() {
Config.Checkers = *checkers
Config.Transfers = *transfers
Config.DryRun = *dryRun
Config.Timeout = *timeout
Config.ConnectTimeout = *connectTimeout
ConfigPath = *configFile

View File

@@ -1,3 +1,3 @@
package fs
const Version = "v1.12"
const Version = "v1.14"

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"log"
"net/http"
"code.google.com/p/goauth2/oauth"
"github.com/ncw/rclone/fs"
@@ -82,7 +81,7 @@ func (auth *Auth) newTransport(name string) (*oauth.Transport, error) {
t := &oauth.Transport{
Config: config,
Transport: http.DefaultTransport,
Transport: fs.Config.Transport(),
}
return t, nil

View File

@@ -42,7 +42,7 @@ const (
var (
// Description of how to auth for this app
storageAuth = &googleauth.Auth{
Scope: storage.DevstorageFull_controlScope,
Scope: storage.DevstorageFullControlScope,
DefaultClientId: rcloneClientId,
DefaultClientSecret: rcloneClientSecret,
}

View File

@@ -18,6 +18,7 @@ import (
"path/filepath"
"sync"
"time"
"unicode/utf8"
"github.com/ncw/rclone/fs"
)
@@ -32,14 +33,15 @@ func init() {
// FsLocal represents a local filesystem rooted at root
type FsLocal struct {
root string // The root directory
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
root string // The root directory
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warned map[string]struct{} // whether we have warned about this string
}
// FsObjectLocal represents a local filesystem object
type FsObjectLocal struct {
local fs.Fs // The Fs this object is part of
local *FsLocal // The Fs this object is part of
remote string // The remote path
path string // The local path
info os.FileInfo // Interface for file info (always present)
@@ -51,7 +53,10 @@ type FsObjectLocal struct {
// NewFs contstructs an FsLocal from the path
func NewFs(name, root string) (fs.Fs, error) {
root = filepath.ToSlash(path.Clean(root))
f := &FsLocal{root: root}
f := &FsLocal{
root: root,
warned: make(map[string]struct{}),
}
// Check to see if this points to a file
fi, err := os.Lstat(f.root)
if err == nil && fi.Mode().IsRegular() {
@@ -134,6 +139,20 @@ func (f *FsLocal) List() fs.ObjectsChan {
return out
}
// CleanUtf8 makes string a valid UTF-8 string
//
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
func (f *FsLocal) cleanUtf8(name string) string {
if utf8.ValidString(name) {
return name
}
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
return string([]rune(name))
}
// Walk the path returning a channel of FsObjects
func (f *FsLocal) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
@@ -147,7 +166,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
for _, item := range items {
if item.IsDir() {
dir := &fs.Dir{
Name: item.Name(),
Name: f.cleanUtf8(item.Name()),
When: item.ModTime(),
Bytes: 0,
Count: 0,
@@ -294,7 +313,7 @@ func (o *FsObjectLocal) String() string {
// Return the remote path
func (o *FsObjectLocal) Remote() string {
return o.remote
return o.local.cleanUtf8(o.remote)
}
// Md5sum calculates the Md5sum of a file returning a lowercase hex string

View File

@@ -62,8 +62,8 @@ var Commands = []Command{
ArgsHelp: "source:path dest:path",
Help: `
Copy the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
size. Doesn't delete files from the destination.`,
unchanged files, testing by size and modification time or
MD5SUM. Doesn't delete files from the destination.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Sync(fdst, fsrc, false)
if err != nil {
@@ -77,11 +77,11 @@ var Commands = []Command{
Name: "sync",
ArgsHelp: "source:path dest:path",
Help: `
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
size. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the --dry-run flag.`,
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the --dry-run flag.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Sync(fdst, fsrc, true)
if err != nil {
@@ -123,7 +123,8 @@ var Commands = []Command{
Name: "lsl",
ArgsHelp: "[remote:path]",
Help: `
List all the objects in the the path with modification time, size and path.`,
List all the objects in the the path with modification time,
size and path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.ListLong(fdst, os.Stdout)
if err != nil {
@@ -242,7 +243,8 @@ Subcommands:
fmt.Fprintf(os.Stderr, "Options:\n")
pflag.PrintDefaults()
fmt.Fprintf(os.Stderr, `
It is only necessary to use a unique prefix of the subcommand, eg 'up' for 'upload'.
It is only necessary to use a unique prefix of the subcommand, eg 'up'
for 'upload'.
`)
}

View File

@@ -184,6 +184,7 @@ func s3Connection(name string) (*s3.S3, error) {
}
c := s3.New(auth, region)
c.Client = fs.Config.Client()
return c, nil
}
@@ -419,9 +420,17 @@ func (o *FsObjectS3) Remote() string {
return o.remote
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *FsObjectS3) Md5sum() (string, error) {
return strings.Trim(strings.ToLower(o.etag), `"`), nil
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(etag) {
fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
return "", nil
}
return etag, nil
}
// Size returns the size of an object in bytes

View File

@@ -113,12 +113,15 @@ func swiftConnection(name string) (*swift.Connection, error) {
return nil, errors.New("auth not found")
}
c := &swift.Connection{
UserName: userName,
ApiKey: apiKey,
AuthUrl: authUrl,
UserAgent: fs.UserAgent,
Tenant: fs.ConfigFile.MustValue(name, "tenant"),
Region: fs.ConfigFile.MustValue(name, "region"),
UserName: userName,
ApiKey: apiKey,
AuthUrl: authUrl,
UserAgent: fs.UserAgent,
Tenant: fs.ConfigFile.MustValue(name, "tenant"),
Region: fs.ConfigFile.MustValue(name, "region"),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fs.Config.Transport(),
}
err := c.Authenticate()
if err != nil {