1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-29 14:43:24 +00:00

Compare commits

..

16 Commits
v0.95 ... v0.97

Author SHA1 Message Date
Nick Craig-Wood
c6dfd5f2d3 Version 0.97 2014-05-05 22:25:29 +01:00
Nick Craig-Wood
99695d57ab Implement single file operations for all file systems 2014-05-05 22:17:57 +01:00
Nick Craig-Wood
ca3752f824 s3: support sub-bucket paths 2014-05-05 18:26:37 +01:00
Nick Craig-Wood
d0ca58bbb1 swift: Support sub container paths 2014-05-05 18:26:37 +01:00
Nick Craig-Wood
580fa3a5a7 Documentation updates 2014-04-26 17:43:41 +01:00
Nick Craig-Wood
365dc2ff59 Version v0.96 - automate the release process 2014-04-25 18:18:58 +01:00
Nick Craig-Wood
a81ae3c3f9 Add version number, -V and --version 2014-04-24 17:59:05 +01:00
Nick Craig-Wood
8fd59f2e7d drive: Use o.Update and fs.Put to optimise transfers 2014-04-18 17:49:01 +01:00
Nick Craig-Wood
02afcb00e9 Factor Fs.Put into Object.Update and call Update rather than Put if possible 2014-04-18 17:49:01 +01:00
Nick Craig-Wood
d6a5bfe2d4 Get rid of fs.CopyFs and replace with fs.Sync in preparation for Object.Update 2014-04-18 17:48:46 +01:00
Nick Craig-Wood
bb0bf2fa8e How to build the docs 2014-04-17 22:39:23 +01:00
Nick Craig-Wood
2c1e6b54f9 Remove examples - leave them for the website 2014-04-17 22:39:07 +01:00
Nick Craig-Wood
40f755df20 Rename docs directory 2014-04-17 22:32:39 +01:00
Nick Craig-Wood
8d32651c53 Fix uplaod_website 2014-04-17 22:28:47 +01:00
Nick Craig-Wood
86b77f3ca8 drive: Fix multiple files of same name being created
ModifiedDate seems to be set on Insert if set, so do that
2014-04-17 22:27:33 +01:00
Nick Craig-Wood
bd62eb17e3 Add favicon 2014-04-17 20:49:12 +01:00
48 changed files with 841 additions and 469 deletions

6
.gitignore vendored
View File

@@ -2,6 +2,8 @@
_junk/
rclone
rclonetest/rclonetest
upload
build
rclone.org/public
docs/public
README.html
README.txt
rclone.1

View File

@@ -1,22 +1,61 @@
rclone:
TAG := $(shell git describe --tags)
LAST_TAG := $(shell git describe --tags --abbrev=0)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = "v" . $$_')
rclone: *.go */*.go
@go version
go build
doc: rclone.1 README.html README.txt
rclone.1: README.md
pandoc -s --from markdown --to man README.md -o rclone.1
README.html: README.md
pandoc -s --from markdown_github --to html README.md -o README.html
README.txt: README.md
pandoc -s --from markdown_github --to plain README.md -o README.txt
install: rclone
install -d ${DESTDIR}/usr/bin
install -t ${DESTDIR}/usr/bin rclone
clean:
go clean
go clean ./...
find . -name \*~ | xargs -r rm -f
rm -rf build rclone.org/public
rm -rf build docs/public
rm -f rclone rclonetest/rclonetest rclone.1 README.html README.txt
website:
cd rclone.org && hugo
cd docs && hugo
upload_website: website
./rclone sync rclone.org/public memstore://www-rclone-org
./rclone -v sync docs/public memstore:www-rclone-org
upload:
rsync -avz build/ www.craig-wood.com:public_html/pub/rclone/
./rclone -v copy build/ memstore:downloads-rclone-org
cross:
./cross-compile
cross: doc
./cross-compile $(TAG)
serve:
cd rclone.org && hugo server -v -w
cd docs && hugo server -v -w
tag:
@echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEW_TAG)"
echo -e "package main\n const Version = \"$(NEW_TAG)\"\n" | gofmt > version.go
cp -av version.go rclonetest/version.go
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
git tag $(NEW_TAG)
@echo "Add this to changelog in README.md"
@echo " * $(NEW_TAG) - " `date -I`
@git log $(LAST_TAG)..$(NEW_TAG) --oneline
@echo "Then commit the changes"
@echo git commit -m "Version $(NEW_TAG)" -a -v
@echo "And finally run make retag before make cross etc"
retag:
echo git tag -f $(LAST_TAG)

226
README.md
View File

@@ -1,9 +1,13 @@
% rclone(1) User Manual
% Nick Craig-Wood
% Apr 24, 2014
Rclone
======
[![Logo](http://rclone.org/rclone-120x120.png)](http://rclone.org/)
[![Logo](http://rclone.org/img/rclone-120x120.png)](http://rclone.org/)
Sync files and directories to and from
Rclone is a command line program to sync files and directories to and from
* Google Drive
* Amazon S3
@@ -20,7 +24,7 @@ Features
* Check mode to check all MD5SUMs
* Can sync to and from network, eg two different Drive accounts
Home page
See the Home page for more documentation and configuration walkthroughs.
* http://rclone.org/
@@ -29,115 +33,35 @@ Install
Rclone is a Go program and comes as a single binary file.
Download the relevant binary from
Download the binary for your OS from
* http://www.craig-wood.com/nick/pub/rclone/
* http://rclone.org/downloads/
Or alternatively if you have Go installed use
go get github.com/ncw/rclone
go install github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`.
You can then modify the source and submit patches.
Configure
---------
First you'll need to configure rclone. As the object storage systems
have quite complicated authentication these are kept in a config file
`.rclone.conf` in your home directory by default. (You can use the
-config option to choose a different config file.)
`--config` option to choose a different config file.)
The easiest way to make the config is to run rclone with the config
option, Eg
rclone config
Here is an example of making an s3 configuration
```
$ rclone config
No remotes found - make a new one
n) New remote
q) Quit config
n/q> n
name> remote
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) drive
type> 2
AWS Access Key ID.
access_key_id> accesskey
AWS Secret Access Key (password).
secret_access_key> secretaccesskey
Endpoint for S3 API.
Choose a number from below, or type in your own value
* The default endpoint - a good choice if you are unsure.
* US Region, Northern Virginia or Pacific Northwest.
* Leave location constraint empty.
1) https://s3.amazonaws.com/
* US Region, Northern Virginia only.
* Leave location constraint empty.
2) https://s3-external-1.amazonaws.com
[snip]
* South America (Sao Paulo) Region
* Needs location constraint sa-east-1.
9) https://s3-sa-east-1.amazonaws.com
endpoint> 1
Location constraint - must be set to match the Endpoint.
Choose a number from below, or type in your own value
* Empty for US Region, Northern Virginia or Pacific Northwest.
1)
* US West (Oregon) Region.
2) us-west-2
[snip]
* South America (Sao Paulo) Region.
9) sa-east-1
location_constraint> 1
--------------------
[remote]
access_key_id = accesskey
secret_access_key = secretaccesskey
endpoint = https://s3.amazonaws.com/
location_constraint =
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
Current remotes:
Name Type
==== ====
remote s3
e) Edit existing remote
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> q
```
This can now be used like this
```
rclone lsd remote: - see all buckets/containers
rclone ls remote: - list a bucket
rclone sync /home/local/directory remote:bucket
```
See the next section for more details.
Usage
-----
Rclone syncs a directory tree from local to remote.
Its basic syntax is like this
Its basic syntax is
Syntax: [options] subcommand <parameters> <parameters...>
@@ -158,7 +82,7 @@ Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the -dry-run flag.
first with the `--dry-run` flag.
rclone ls [remote:path]
@@ -166,7 +90,7 @@ List all the objects in the the path.
rclone lsd [remote:path]
List all directoryes/objects/buckets in the the path.
List all directories/objects/buckets in the the path.
rclone mkdir remote:path
@@ -188,17 +112,23 @@ compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
General options:
* `-config` Location of the config file
* `-transfers=4`: Number of file transfers to run in parallel.
* `-checkers=8`: Number of MD5SUM checkers to run in parallel.
* `-dry-run=false`: Do a trial run with no permanent changes
* `-modify-window=1ns`: Max time difference to be considered the same - this is automatically set usually
* `-quiet=false`: Print as little stuff as possible
* `-stats=1m0s`: Interval to print stats
* `-verbose=false`: Print lots more stuff
```
--checkers=8: Number of checkers to run in parallel.
--config="~/.rclone.conf": Config file.
-n, --dry-run=false: Do a trial run with no permanent changes
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats
--transfers=4: Number of file transfers to run in parallel.
-v, --verbose=false: Print lots more stuff
```
Developer options:
* `-cpuprofile=""`: Write cpu profile to file
```
--cpuprofile="": Write cpu profile to file
```
Local Filesystem
----------------
@@ -207,13 +137,14 @@ Paths are specified as normal filesystem paths, so
rclone sync /home/source /tmp/destination
Will sync source to destination
Will sync `/home/source` to `/tmp/destination`
Swift / Rackspace cloudfiles / Memset Memstore
----------------------------------------------
Paths are specified as remote:container (or remote: for the `lsd`
command.)
command.) You may put subdirectories in too, eg
`remote:container/path/to/dir`.
So to copy a local directory to a swift container called backup:
@@ -229,7 +160,8 @@ os.Stat) for an object.
Amazon S3
---------
Paths are specified as remote:bucket
Paths are specified as remote:bucket. You may put subdirectories in
too, eg `remote:bucket/path/to/dir`.
So to copy a local directory to a s3 container called backup
@@ -244,57 +176,28 @@ Google drive
Paths are specified as drive:path Drive paths may be as deep as required.
The initial setup for drive involves getting a token from Google drive
which you need to do in your browser. The `rclone config` walks you
which you need to do in your browser. `rclone config` walks you
through it.
Here is an example of how to make a remote called `drv`
```
$ ./rclone config
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> drv
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) drive
type> 4
Google Application Client Id - leave blank to use rclone's.
client_id>
Google Application Client Secret - leave blank to use rclone's.
client_secret>
Remote config
Go to the following link in your browser
https://accounts.google.com/o/oauth2/auth?access_type=&approval_prompt=&client_id=XXXXXXXXXXXX.apps.googleusercontent.com&redirect_uri=urn%3XXXXX%3Awg%3Aoauth%3XX.0%3Aoob&response_type=code&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&state=state
Log in, then type paste the token that is returned in the browser here
Enter verification code> X/XXXXXXXXXXXXXXXXXX-XXXXXXXXX.XXXXXXXXX-XXXXX_XXXXXXX_XXXXXXX
--------------------
[drv]
client_id =
client_secret =
token = {"AccessToken":"xxxx.xxxxxxx_xxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"1/xxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxx","Expiry":"2014-03-16T13:57:58.955387075Z","Extra":null}
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
You can then use it like this
rclone lsd drv:
rclone ls drv:
To copy a local directory to a drive directory called backup
rclone copy /home/source drv:backup
Google drive stores modification times accurate to 1 ms.
Single file copies
------------------
Rclone can copy single files
rclone src:path/to/file dst:path/dir
Or
rclone src:path/to/file dst:path/to/file
Note that you can't rename the file if you are copying from one file to another.
License
-------
@@ -304,12 +207,41 @@ COPYING file included in this package).
Bugs
----
* Doesn't sync individual files yet, only directories.
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work
Changelog
---------
* v0.97 - 2014-05-05
* Implement copying of single files
* s3 & swift: support paths inside containers/buckets
* v0.96 - 2014-04-24
* drive: Fix multiple files of same name being created
* drive: Use o.Update and fs.Put to optimise transfers
* Add version number, -V and --version
* v0.95 - 2014-03-28
* rclone.org: website, docs and graphics
* drive: fix path parsing
* v0.94 - 2014-03-27
* Change remote format one last time
* GNU style flags
* v0.93 - 2014-03-16
* drive: store token in config file
* cross compile other versions
* set strict permissions on config file
* v0.92 - 2014-03-15
* Config fixes and --config option
* v0.91 - 2014-03-15
* Make config file
* v0.90 - 2013-06-27
* Project named rclone
* v0.00 - 2012-11-18
* Project started
Contact and support
-------------------
@@ -317,7 +249,7 @@ The project website is at:
* https://github.com/ncw/rclone
There you can file bug reports, ask for help or contribute patches.
There you can file bug reports, ask for help or send pull requests.
Authors
-------

View File

@@ -1,26 +1,31 @@
#!/bin/sh
set -e
# This uses gox from https://github.com/mitchellh/gox
# Make sure you've run gox -build-toolchain
if [ "$1" == "" ]; then
echo "Syntax: $0 Version"
exit 1
fi
VERSION="$1"
rm -rf build
gox -output "build/{{.OS}}/{{.Arch}}/{{.Dir}}"
gox -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}"
cat <<'#EOF' > build/README.txt
This directory contains builds of the rclone program.
mv build/rclone-${VERSION}-darwin-amd64 build/rclone-${VERSION}-osx-amd64
mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
Rclone is a program to transfer files to and from cloud storage
systems such as Google Drive, Amazon S3 and Swift (Rackspace
Cloudfiles).
cd build
See the project website here: https://github.com/ncw/rclone for more
details.
for d in `ls`; do
cp -a ../README.txt $d/
cp -a ../README.html $d/
cp -a ../rclone.1 $d/
zip -r9 $d.zip $d
rm -rf $d
done
The files in this directory are organised by OS and processor type
#EOF
mv build/darwin build/osx
( cd build ; tree . >> README.txt )
cd ..

6
docs/README.md Normal file
View File

@@ -0,0 +1,6 @@
Docs
====
See the content directory for the docs in markdown format.
Use [hugo](https://github.com/spf13/hugo) to build the website.

View File

@@ -2,7 +2,7 @@
title: "Rclone"
description: "rclone syncs files to and from Google Drive, S3, Swift and Cloudfiles."
type: page
date: "2014-03-19"
date: "2014-04-26"
groups: ["about"]
---
@@ -11,7 +11,7 @@ Rclone
[![Logo](/img/rclone-120x120.png)](http://rclone.org/)
Sync files and directories to and from
Rclone is a command line program to sync files and directories to and from
* Google Drive
* Amazon S3
@@ -33,6 +33,6 @@ Links
* [Home page](http://rclone.org/)
* [Github project page for source and more instructions](http://github.com/ncw/rclone)
* <a href="https://plus.google.com/110609214444437761115" rel="publisher">Google+ page</a></li>
* [Downloads](http://www.craig-wood.com/nick/pub/rclone/)
* [Downloads](/downloads/)
rclone is brought to you by <a href="http://www.craig-wood.com/nick/">Nick Craig-Wood</a> <img src="http://www.craig-wood.com/nick/small/njcw.jpg" />

View File

@@ -1,12 +1,12 @@
---
title: "Contact"
description: "Contact the rclone project"
date: "2014-03-19"
date: "2014-04-26"
---
Contact the rclone project
* [Github project page for source and reporting bugs](http://github.com/ncw/rclone)
* [Github project page for source, reporting bugs and pull requests](http://github.com/ncw/rclone)
* <a href="https://plus.google.com/110609214444437761115" rel="publisher">Google+ page for general comments</a></li>
Or email [Nick Craig-Wood](mailto:nick@craig-wood.com)

View File

@@ -1,7 +1,7 @@
---
title: "Documentation"
description: "Rclone Documentation"
date: "2014-03-19"
date: "2014-04-26"
---
Install
@@ -9,7 +9,7 @@ Install
Rclone is a Go program and comes as a single binary file.
[Download the relevant binary.](http://www.craig-wood.com/nick/pub/rclone/)
[Download](/downloads/) the relevant binary.
Or alternatively if you have Go installed use
@@ -17,15 +17,13 @@ Or alternatively if you have Go installed use
and this will build the binary in `$GOPATH/bin`.
You can then modify the source and submit patches.
Configure
---------
First you'll need to configure rclone. As the object storage systems
have quite complicated authentication these are kept in a config file
`.rclone.conf` in your home directory by default. (You can use the
`-config` option to choose a different config file.)
`--config` option to choose a different config file.)
The easiest way to make the config is to run rclone with the config
option:

35
docs/content/downloads.md Normal file
View File

@@ -0,0 +1,35 @@
---
title: "Rclone downloads"
description: "Download rclone binaries for your OS."
type: page
date: "2014-05-05"
---
v0.97
=====
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.97-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.97-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.97-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.97-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.97-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.97-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v0.97-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.97-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.97-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v0.97-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.97-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.97-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v0.97-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.97-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.97-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.97-plan9-386.zip)
Older downloads can be found [here](http://downloads.rclone.org/)

View File

@@ -0,0 +1,35 @@
---
title: "Rclone downloads"
description: "Download rclone binaries for your OS."
type: page
date: "DATE"
---
VERSION
=====
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-VERSION-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-VERSION-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-VERSION-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-plan9-386.zip)
Older downloads can be found [here](http://downloads.rclone.org/)

View File

@@ -1,7 +1,7 @@
---
title: "Google drive"
description: "Rclone docs for Google drive"
date: "2014-03-19"
date: "2014-04-26"
---
Paths are specified as `drive:path`

View File

@@ -1,3 +1,9 @@
---
title: "Local Filesystem"
description: "Rclone docs for the local filesystem"
date: "2014-04-26"
---
Local Filesystem
----------------
@@ -13,7 +19,7 @@ but it is probably easier not to.
Modified time
-------------
We read and write the modified time using an accuracy determined by
Rclone reads and writes the modified time using an accuracy determined by
the OS. Typically this is 1ns on Linux, 10 ns on Windows and 1 Second
on OS X.

View File

@@ -1,7 +1,7 @@
---
title: "Amazon S3"
description: "Rclone docs for Amazon S3"
date: "2014-03-19"
date: "2014-04-26"
---
Paths are specified as `remote:bucket` or `remote:`

View File

@@ -1,7 +1,7 @@
---
title: "Swift"
description: "Swift"
date: "2014-03-19"
date: "2014-04-26"
---
Swift refers to [Openstack Object Storage](http://www.openstack.org/software/openstack-storage/).

View File

@@ -5,6 +5,7 @@
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>{{ .Title }}</title>
<link rel="icon" type="image/png" href="/img/rclone-32x32.png">
<link href="/css/bootstrap.min.css" rel="stylesheet">
<link href="/css/base.css" rel="stylesheet">
<!--[if lt IE 9]>

View File

@@ -12,12 +12,14 @@
<div class="navbar-collapse collapse">
<ul class="nav navbar-nav">
<li class="active"><a href="/">Home</a></li>
<li><a href="/downloads/">Downloads</a></li>
<li><a href="/docs/">Docs</a></li>
<li><a href="/contact/">Contact</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">Contents <b class="caret"></b></a>
<ul class="dropdown-menu">
<li><a href="/">About</a></li>
<li><a href="/downloads/">Downloads</a></li>
<li><a href="/docs/">Docs</a></li>
<li class="divider"></li>
<li class="dropdown-header">Storage systems</li>

View File

Before

Width:  |  Height:  |  Size: 62 KiB

After

Width:  |  Height:  |  Size: 62 KiB

View File

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 19 KiB

BIN
docs/static/img/rclone-32x32.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 KiB

View File

@@ -135,14 +135,15 @@ func (name tokenCache) PutToken(token *oauth.Token) error {
// FsDrive represents a remote drive server
type FsDrive struct {
svc *drive.Service // the connection to the drive server
root string // the path we are working on
client *http.Client // authorized client
about *drive.About // information about the drive, including the root
rootId string // Id of the root directory
foundRoot sync.Once // Whether we need to find the root directory or not
dirCache dirCache // Map of directory path to directory id
findDirLock sync.Mutex // Protect findDir from concurrent use
svc *drive.Service // the connection to the drive server
root string // the path we are working on
client *http.Client // authorized client
about *drive.About // information about the drive, including the root
rootId string // Id of the root directory
foundRoot bool // Whether we have found the root or not
findRootLock sync.Mutex // Protect findRoot from concurrent use
dirCache dirCache // Map of directory path to directory id
findDirLock sync.Mutex // Protect findDir from concurrent use
}
// FsObjectDrive describes a drive object
@@ -305,7 +306,10 @@ func NewFs(name, path string) (fs.Fs, error) {
if err != nil {
return nil, err
}
f := &FsDrive{root: root, dirCache: newDirCache()}
f := &FsDrive{
root: root,
dirCache: newDirCache(),
}
// Try to pull the token from the cache; if this fails, we need to get one.
token, err := t.Config.TokenCache.Token()
@@ -331,14 +335,33 @@ func NewFs(name, path string) (fs.Fs, error) {
f.rootId = f.about.RootFolderId
// Put the root directory in
f.dirCache.Put("", f.rootId)
// Find the current root
err = f.findRoot(false)
if err != nil {
// Assume it is a file
newRoot, remote := splitPath(root)
newF := *f
newF.root = newRoot
// Make new Fs which is the parent
err = newF.findRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
obj, err := newF.newFsObjectWithInfo(remote, nil)
if err != nil {
// File doesn't exist so return old f
return f, nil
}
// return a Fs Limited to this object
return fs.NewLimited(&newF, obj), nil
}
// fmt.Printf("Root id %s", f.rootId)
return f, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDrive) NewFsObjectWithInfo(remote string, info *drive.File) fs.Object {
func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
fs := &FsObjectDrive{
drive: f,
remote: remote,
@@ -349,9 +372,18 @@ func (f *FsDrive) NewFsObjectWithInfo(remote string, info *drive.File) fs.Object
err := fs.readMetaData() // reads info and meta, returning an error
if err != nil {
// logged already fs.Debug("Failed to read info: %s", err)
return nil
return nil, err
}
}
return fs, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDrive) NewFsObjectWithInfo(remote string, info *drive.File) fs.Object {
fs, _ := f.newFsObjectWithInfo(remote, info)
// Errors have already been logged
return fs
}
@@ -585,14 +617,21 @@ func (f *FsDrive) _findDir(path string, create bool) (pathId string, err error)
//
// If create is set it will make the directory if not found
func (f *FsDrive) findRoot(create bool) error {
var err error
f.foundRoot.Do(func() {
f.rootId, err = f.findDir(f.root, create)
f.dirCache.Flush()
// Put the root directory in
f.dirCache.Put("", f.rootId)
})
return err
f.findRootLock.Lock()
defer f.findRootLock.Unlock()
if f.foundRoot {
return nil
}
rootId, err := f.findDir(f.root, create)
if err != nil {
return err
}
f.rootId = rootId
f.dirCache.Flush()
// Put the root directory in
f.dirCache.Put("", f.rootId)
f.foundRoot = true
return nil
}
// Walk the path returning a channel of FsObjects
@@ -648,61 +687,48 @@ func (f *FsDrive) ListDir() fs.DirChan {
return out
}
// Put the FsObject into the container
// Put the object
//
// This assumes that the object doesn't not already exists - if you
// call it when it does exist then it will create a duplicate. Call
// object.Update() in this case.
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created
// The new object may have been created if an error is returned
func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction
fs := &FsObjectDrive{drive: f, remote: remote}
o := &FsObjectDrive{drive: f, remote: remote}
directory, leaf := splitPath(remote)
directory, leaf := splitPath(o.remote)
directoryId, err := f.findDir(directory, true)
if err != nil {
return nil, fmt.Errorf("Couldn't find or make directory: %s", err)
return o, fmt.Errorf("Couldn't find or make directory: %s", err)
}
// Guess the mime type
mimeType := mime.TypeByExtension(path.Ext(remote))
mimeType := mime.TypeByExtension(path.Ext(o.remote))
if mimeType == "" {
mimeType = "application/octet-stream"
}
modifiedDate := modTime.Format(time.RFC3339Nano)
// Define the metadata for the file we are going to create.
info := &drive.File{
Title: leaf,
Description: leaf,
Parents: []*drive.ParentReference{{Id: directoryId}},
MimeType: mimeType,
Title: leaf,
Description: leaf,
Parents: []*drive.ParentReference{{Id: directoryId}},
MimeType: mimeType,
ModifiedDate: modifiedDate,
}
// FIXME can't set modified date on initial upload as no
// .SetModifiedDate(). This agrees with the API docs, but not
// with the comment on
// https://developers.google.com/drive/v2/reference/files/insert
//
// modifiedDate datetime Last time this file was modified by
// anyone (formatted RFC 3339 timestamp). This is only mutable
// on update when the setModifiedDate parameter is set.
// writable
//
// There is no setModifiedDate parameter though
// Make the API request to upload infodata and file data.
// Make the API request to upload metadata and file data.
info, err = f.svc.Files.Insert(info).Media(in).Do()
if err != nil {
return nil, fmt.Errorf("Upload failed: %s", err)
return o, fmt.Errorf("Upload failed: %s", err)
}
fs.setMetaData(info)
// Set modified date
info.ModifiedDate = modTime.Format(time.RFC3339Nano)
_, err = f.svc.Files.Update(info.Id, info).SetModifiedDate(true).Do()
if err != nil {
return fs, fmt.Errorf("Failed to set mtime: %s", err)
}
return fs, nil
o.setMetaData(info)
return o, nil
}
// Mkdir creates the container if it doesn't exist
@@ -886,6 +912,26 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
return res.Body, nil
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) error {
info := &drive.File{
Id: o.id,
ModifiedDate: modTime.Format(time.RFC3339Nano),
}
// Make the API request to upload metadata and file data.
info, err := o.drive.svc.Files.Update(info.Id, info).SetModifiedDate(true).Media(in).Do()
if err != nil {
return fmt.Errorf("Update failed: %s", err)
}
o.setMetaData(info)
return nil
}
// Remove an object
func (o *FsObjectDrive) Remove() error {
return o.drive.svc.Files.Delete(o.id).Do()

View File

@@ -18,9 +18,15 @@ var (
// Filesystem info
type FsInfo struct {
Name string // name of this fs
NewFs func(string, string) (Fs, error) // create a new file system
Config func(string) // function to call to help with config
// Name of this fs
Name string
// Create a new file system. If root refers to an existing
// object, then it should return a Fs which only returns that
// object.
NewFs func(name string, root string) (Fs, error)
// Function to call to help with config
Config func(string)
// Options for the Fs configuration
Options []Option
}
@@ -103,6 +109,9 @@ type Object interface {
// Open opens the file for read. Call Close() on the returned io.ReadCloser
Open() (io.ReadCloser, error)
// Update in to the object with the modTime given of the given size
Update(in io.Reader, modTime time.Time, size int64) error
// Storable says whether this object can be stored
Storable() bool

88
fs/limited.go Normal file
View File

@@ -0,0 +1,88 @@
package fs
import (
"fmt"
"io"
"time"
)
// This defines a Limited Fs which can only return the Objects passed in from the Fs passed in
type Limited struct {
objects []Object
fs Fs
}
// NewLimited maks a limited Fs limited to the objects passed in
func NewLimited(fs Fs, objects ...Object) Fs {
f := &Limited{
objects: objects,
fs: fs,
}
return f
}
// String returns a description of the FS
func (f *Limited) String() string {
return fmt.Sprintf("%s limited to %d objects", f.fs.String(), len(f.objects))
}
// List the Fs into a channel
func (f *Limited) List() ObjectsChan {
out := make(ObjectsChan, Config.Checkers)
go func() {
for _, obj := range f.objects {
out <- obj
}
close(out)
}()
return out
}
// List the Fs directories/buckets/containers into a channel
func (f *Limited) ListDir() DirChan {
out := make(DirChan, Config.Checkers)
close(out)
return out
}
// Find the Object at remote. Returns nil if can't be found
func (f *Limited) NewFsObject(remote string) Object {
for _, obj := range f.objects {
if obj.Remote() == remote {
return obj
}
}
return nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Limited) Put(in io.Reader, remote string, modTime time.Time, size int64) (Object, error) {
obj := f.NewFsObject(remote)
if obj == nil {
return nil, fmt.Errorf("Can't create %q in limited fs", remote)
}
return obj, obj.Update(in, modTime, size)
}
// Make the directory (container, bucket)
func (f *Limited) Mkdir() error {
// All directories are already made - just ignore
return nil
}
// Remove the directory (container, bucket) if empty
func (f *Limited) Rmdir() error {
return fmt.Errorf("Can't rmdir in limited fs")
}
// Precision of the ModTimes in this Fs
func (f *Limited) Precision() time.Duration {
return f.fs.Precision()
}
// Check the interfaces are satisfied
var _ Fs = &Limited{}

View File

@@ -77,7 +77,7 @@ func Equal(src, dst Object) bool {
if dt >= ModifyWindow || dt <= -ModifyWindow {
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
} else {
Debug(src, "Size and modification time differ by %s (within %s)", dt, ModifyWindow)
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
return true
}
@@ -97,8 +97,12 @@ func Equal(src, dst Object) bool {
return true
}
// Copy src object to f
func Copy(f Fs, src Object) {
// Copy src object to dst or f if nil
//
// If dst is nil then the object must not exist already. If you do
// call Copy() with dst nil on a pre-existing file then some filing
// systems (eg Drive) may duplicate the file.
func Copy(f Fs, dst, src Object) {
in0, err := src.Open()
if err != nil {
Stats.Error()
@@ -107,7 +111,14 @@ func Copy(f Fs, src Object) {
}
in := NewAccount(in0) // account the transfer
dst, err := f.Put(in, src.Remote(), src.ModTime(), src.Size())
var actionTaken string
if dst != nil {
actionTaken = "Copied (updated existing)"
err = dst.Update(in, src.ModTime(), src.Size())
} else {
actionTaken = "Copied (new)"
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
}
inErr := in.Close()
if err == nil {
err = inErr
@@ -125,14 +136,15 @@ func Copy(f Fs, src Object) {
}
return
}
Debug(src, "Copied")
Debug(src, actionTaken)
}
// Check to see if src needs to be copied to dst and if so puts it in out
func checkOne(src, dst Object, out ObjectsChan) {
func checkOne(pair ObjectPair, out ObjectPairChan) {
src, dst := pair.src, pair.dst
if dst == nil {
Debug(src, "Couldn't find local file - download")
out <- src
Debug(src, "Couldn't find file - need to transfer")
out <- pair
return
}
// Check to see if can store this
@@ -144,77 +156,33 @@ func checkOne(src, dst Object, out ObjectsChan) {
Debug(src, "Unchanged skipping")
return
}
out <- src
out <- pair
}
// Read FsObjects~s on in send to out if they need uploading
//
// FIXME potentially doing lots of MD5SUMS at once
func PairChecker(in ObjectPairChan, out ObjectsChan, wg *sync.WaitGroup) {
func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
defer wg.Done()
for pair := range in {
src := pair.src
Stats.Checking(src)
checkOne(src, pair.dst, out)
Stats.DoneChecking(src)
}
}
// Read FsObjects~s on in send to out if they need uploading
//
// FIXME potentially doing lots of MD5SUMS at once
func Checker(in, out ObjectsChan, fdst Fs, wg *sync.WaitGroup) {
defer wg.Done()
for src := range in {
Stats.Checking(src)
dst := fdst.NewFsObject(src.Remote())
checkOne(src, dst, out)
checkOne(pair, out)
Stats.DoneChecking(src)
}
}
// Read FsObjects on in and copy them
func Copier(in ObjectsChan, fdst Fs, wg *sync.WaitGroup) {
func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
defer wg.Done()
for src := range in {
for pair := range in {
src := pair.src
Stats.Transferring(src)
Copy(fdst, src)
Copy(fdst, pair.dst, src)
Stats.DoneTransferring(src)
}
}
// Copies fsrc into fdst
func CopyFs(fdst, fsrc Fs) error {
err := fdst.Mkdir()
if err != nil {
Stats.Error()
return err
}
to_be_checked := fsrc.List()
to_be_uploaded := make(ObjectsChan, Config.Transfers)
var checkerWg sync.WaitGroup
checkerWg.Add(Config.Checkers)
for i := 0; i < Config.Checkers; i++ {
go Checker(to_be_checked, to_be_uploaded, fdst, &checkerWg)
}
var copierWg sync.WaitGroup
copierWg.Add(Config.Transfers)
for i := 0; i < Config.Transfers; i++ {
go Copier(to_be_uploaded, fdst, &copierWg)
}
Log(fdst, "Waiting for checks to finish")
checkerWg.Wait()
close(to_be_uploaded)
Log(fdst, "Waiting for transfers to finish")
copierWg.Wait()
return nil
}
// Delete all the files passed in the channel
func DeleteFiles(to_be_deleted ObjectsChan) {
var wg sync.WaitGroup
@@ -247,7 +215,9 @@ func DeleteFiles(to_be_deleted ObjectsChan) {
}
// Syncs fsrc into fdst
func Sync(fdst, fsrc Fs) error {
//
// If Delete is true then it deletes any files in fdst that aren't in fsrc
func Sync(fdst, fsrc Fs, Delete bool) error {
err := fdst.Mkdir()
if err != nil {
Stats.Error()
@@ -265,7 +235,7 @@ func Sync(fdst, fsrc Fs) error {
// Read source files checking them off against dest files
to_be_checked := make(ObjectPairChan, Config.Transfers)
to_be_uploaded := make(ObjectsChan, Config.Transfers)
to_be_uploaded := make(ObjectPairChan, Config.Transfers)
var checkerWg sync.WaitGroup
checkerWg.Add(Config.Checkers)
@@ -287,8 +257,8 @@ func Sync(fdst, fsrc Fs) error {
delete(delFiles, remote)
to_be_checked <- ObjectPair{src, dst}
} else {
// No need to check doesn't exist
to_be_uploaded <- src
// No need to check since doesn't exist
to_be_uploaded <- ObjectPair{src, nil}
}
}
close(to_be_checked)
@@ -300,20 +270,23 @@ func Sync(fdst, fsrc Fs) error {
Log(fdst, "Waiting for transfers to finish")
copierWg.Wait()
if Stats.Errored() {
Log(fdst, "Not deleting files as there were IO errors")
return nil
}
// Delete the spare files
toDelete := make(ObjectsChan, Config.Transfers)
go func() {
for _, fs := range delFiles {
toDelete <- fs
// Delete files if asked
if Delete {
if Stats.Errored() {
Log(fdst, "Not deleting files as there were IO errors")
return nil
}
close(toDelete)
}()
DeleteFiles(toDelete)
// Delete the spare files
toDelete := make(ObjectsChan, Config.Transfers)
go func() {
for _, fs := range delFiles {
toDelete <- fs
}
close(toDelete)
}()
DeleteFiles(toDelete)
}
return nil
}

View File

@@ -45,6 +45,16 @@ type FsObjectLocal struct {
func NewFs(name, root string) (fs.Fs, error) {
root = path.Clean(root)
f := &FsLocal{root: root}
// Check to see if this points to a file
fi, err := os.Lstat(f.root)
if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root
remote := path.Base(root)
f.root = path.Dir(root)
obj := f.NewFsObject(remote)
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
}
return f, nil
}
@@ -164,30 +174,7 @@ func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64
dstPath := filepath.Join(f.root, remote)
// Temporary FsObject under construction
fs := &FsObjectLocal{local: f, remote: remote, path: dstPath}
dir := path.Dir(dstPath)
err := os.MkdirAll(dir, 0770)
if err != nil {
return fs, err
}
out, err := os.Create(dstPath)
if err != nil {
return fs, err
}
_, err = io.Copy(out, in)
outErr := out.Close()
if err != nil {
return fs, err
}
if outErr != nil {
return fs, outErr
}
// Set the mtime
fs.SetModTime(modTime)
return fs, err
return fs, fs.Update(in, modTime, size)
}
// Mkdir creates the directory if it doesn't exist
@@ -335,6 +322,33 @@ func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
return
}
// Update the object from in with modTime and size
func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) error {
dir := path.Dir(o.path)
err := os.MkdirAll(dir, 0770)
if err != nil {
return err
}
out, err := os.Create(o.path)
if err != nil {
return err
}
_, err = io.Copy(out, in)
outErr := out.Close()
if err != nil {
return err
}
if outErr != nil {
return outErr
}
// Set the mtime
o.SetModTime(modTime)
return nil
}
// Stat a FsObject into info
func (o *FsObjectLocal) lstat() error {
info, err := os.Lstat(o.path)

View File

@@ -1,7 +1,4 @@
Todo
* Make a test suite which can run on all the given types of fs
* Copy should use the sync code as it is more efficient at directory listing
* FIXME: ls without an argument for buckets/containers?
* FIXME: More -dry-run checks for object transfer
* Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files
* swift: Ignoring the pseudo directories
@@ -14,7 +11,6 @@ Todo
* make Account do progress meter
* Make logging controllable with flags (mostly done)
* -timeout: Make all timeouts be settable with command line parameters
* Check the locking in swift module!
* Windows paths? Do we need to translate / and \?
* Make a fs.Errorf and count errors and log them at a different level
* Add max object size to fs metadata - 5GB for swift, infinite for local, ? for s3
@@ -24,18 +20,20 @@ Ideas
* could do encryption - put IV into metadata?
* optimise remote copy container to another container using remote
copy if local is same as remote - use an optional Copier interface
* Allow subpaths container:/sub/path
* support
* sftp
* scp
* Google cloud storage: https://developers.google.com/storage/
* rsync over ssh
* dropbox: https://github.com/nickoneill/go-dropbox (no MD5s)
* control times sync (which is slow) with -a --archive flag?
Need to make directory objects otherwise can't upload an empty directory
* Or could upload empty directories only?
* Can't purge a local filesystem because it leaves the directories behind
Copying a single file? Or maybe with a glob pattern? Could do with LimitedFs
s3
* Can maybe set last modified?
* https://forums.aws.amazon.com/message.jspa?messageID=214062
@@ -43,5 +41,24 @@ s3
* Returns etag and last modified in bucket list
Bugs
* Non verbose - not sure number transferred got counted up? CHECK
* When doing copy it recurses the whole of the destination FS which isn't necessary
Non verbose - not sure number transferred got counted up? CHECK
Making a release
* go build ./...
* cd rclonetest
* go build
* ./rclonetest memstore:
* ./rclonetest s3:
* ./rclonetest drive2:
* ./rclonetest /tmp/z
* cd ..
* make tag
* edit README.md Changelog
* git commit version.go rclonetest/version.go README.md docs/content/downloads.md
* make retag
* . ~/bin/go-cross
* make cross
* make upload
* make upload_website
* git push --tags

View File

@@ -27,6 +27,7 @@ var (
// Flags
cpuprofile = pflag.StringP("cpuprofile", "", "", "Write cpu profile to file")
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats")
version = pflag.BoolP("version", "V", false, "Print the version number")
)
type Command struct {
@@ -61,7 +62,7 @@ var Commands = []Command{
unchanged files, testing first by modification time then by
MD5SUM. Doesn't delete files from the destination.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.CopyFs(fdst, fsrc)
err := fs.Sync(fdst, fsrc, false)
if err != nil {
log.Fatalf("Failed to copy: %v", err)
}
@@ -79,7 +80,7 @@ var Commands = []Command{
exist in destination. Since this can cause data loss, test
first with the --dry-run flag.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Sync(fdst, fsrc)
err := fs.Sync(fdst, fsrc, true)
if err != nil {
log.Fatalf("Failed to sync: %v", err)
}
@@ -105,7 +106,7 @@ var Commands = []Command{
Name: "lsd",
ArgsHelp: "[remote://path]",
Help: `
List all directoryes/objects/buckets in the the path.`,
List all directories/containers/buckets in the the path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.ListDir(fdst)
if err != nil {
@@ -193,13 +194,13 @@ var Commands = []Command{
// syntaxError prints the syntax
func syntaxError() {
fmt.Fprintf(os.Stderr, `Sync files and directories to and from local and remote object stores
fmt.Fprintf(os.Stderr, `Sync files and directories to and from local and remote object stores - %s.
Syntax: [options] subcommand <parameters> <parameters...>
Subcommands:
`)
`, Version)
for i := range Commands {
cmd := &Commands[i]
fmt.Fprintf(os.Stderr, " %s %s\n", cmd.Name, cmd.ArgsHelp)
@@ -299,6 +300,10 @@ func StartStats() {
func main() {
ParseFlags()
if *version {
fmt.Printf("rclone %s\n", Version)
os.Exit(0)
}
command, args := ParseCommand()
// Make source and destination fs

View File

@@ -26,6 +26,7 @@ import (
// Globals
var (
localName, remoteName string
version = pflag.BoolP("version", "V", false, "Print the version number")
)
// Represents an item for checking
@@ -150,7 +151,7 @@ var t2 = Time("2011-12-25T12:59:59.123456789Z")
func TestCopy(flocal, fremote fs.Fs) {
WriteFile("empty", "", t1)
err := fs.CopyFs(fremote, flocal)
err := fs.Sync(fremote, flocal, false)
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
@@ -168,7 +169,7 @@ func TestSync(flocal, fremote fs.Fs) {
if err != nil {
log.Fatalf("Chtimes failed: %v", err)
}
err = fs.Sync(fremote, flocal)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
@@ -182,7 +183,7 @@ func TestSync(flocal, fremote fs.Fs) {
log.Printf("Sync after adding a file")
WriteFile("potato", "------------------------------------------------------------", t1)
err = fs.Sync(fremote, flocal)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
@@ -197,7 +198,7 @@ func TestSync(flocal, fremote fs.Fs) {
log.Printf("Sync after changing a file's size only")
WriteFile("potato", "smaller but same date", t1)
err = fs.Sync(fremote, flocal)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
@@ -215,7 +216,7 @@ func TestSync(flocal, fremote fs.Fs) {
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
err = fs.Sync(fremote, flocal)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
@@ -257,7 +258,7 @@ func TestRmdir(flocal, fremote fs.Fs) {
}
func syntaxError() {
fmt.Fprintf(os.Stderr, `Test rclone with a remote to find bugs in either.
fmt.Fprintf(os.Stderr, `Test rclone with a remote to find bugs in either - %s.
Syntax: [options] remote:
@@ -266,7 +267,7 @@ directory under it and perform tests on it, deleting it at the end.
Options:
`)
`, Version)
pflag.PrintDefaults()
}
@@ -282,6 +283,10 @@ func cleanTempDir() {
func main() {
pflag.Usage = syntaxError
pflag.Parse()
if *version {
fmt.Printf("rclonetest %s\n", Version)
os.Exit(0)
}
fs.LoadConfig()
rand.Seed(time.Now().UnixNano())
args := pflag.Args()

3
rclonetest/version.go Normal file
View File

@@ -0,0 +1,3 @@
package main
const Version = "v0.97"

178
s3/s3.go
View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"log"
"mime"
"net/http"
"path"
@@ -111,6 +110,7 @@ type FsS3 struct {
b *s3.Bucket // the connection to the bucket
bucket string // the bucket we are working on
perm s3.ACL // permissions for new buckets / objects
root string // root of the bucket - ignore all objects above this
}
// FsObjectS3 describes a s3 object
@@ -131,7 +131,10 @@ type FsObjectS3 struct {
// String converts this FsS3 to a string
func (f *FsS3) String() string {
return fmt.Sprintf("S3 bucket %s", f.bucket)
if f.root == "" {
return fmt.Sprintf("S3 bucket %s", f.bucket)
}
return fmt.Sprintf("S3 bucket %s path %s", f.bucket, f.root)
}
// Pattern to match a s3 path
@@ -185,14 +188,11 @@ func s3Connection(name string) (*s3.S3, error) {
}
// NewFsS3 contstructs an FsS3 from the path, bucket:path
func NewFs(name, path string) (fs.Fs, error) {
bucket, directory, err := s3ParsePath(path)
func NewFs(name, root string) (fs.Fs, error) {
bucket, directory, err := s3ParsePath(root)
if err != nil {
return nil, err
}
if directory != "" {
return nil, fmt.Errorf("Directories not supported yet in %q: %q", path, directory)
}
c, err := s3Connection(name)
if err != nil {
return nil, err
@@ -202,6 +202,24 @@ func NewFs(name, path string) (fs.Fs, error) {
bucket: bucket,
b: c.Bucket(bucket),
perm: s3.Private, // FIXME need user to specify
root: directory,
}
if f.root != "" {
f.root += "/"
// Check to see if the object exists
_, err = f.b.Head(directory, nil)
if err == nil {
remote := path.Base(directory)
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
obj := f.NewFsObject(remote)
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
}
}
return f, nil
}
@@ -241,48 +259,100 @@ func (f *FsS3) NewFsObject(remote string) fs.Object {
return f.NewFsObjectWithInfo(remote, nil)
}
// Walk the path returning a channel of FsObjects
func (f *FsS3) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
// FIXME need to implement ALL loop
objects, err := f.b.List("", "", "", 10000)
if err != nil {
fs.Stats.Error()
log.Printf("Couldn't read bucket %q: %s", f.bucket, err)
// list the objects into the function supplied
//
// If directories is set it only sends directories
func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
delimiter := ""
if directories {
delimiter = "/"
}
// FIXME need to implement ALL loop
objects, err := f.b.List(f.root, delimiter, "", 10000)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
} else {
rootLength := len(f.root)
if directories {
for _, remote := range objects.CommonPrefixes {
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
continue
}
remote := remote[rootLength:]
fn(remote, &s3.Key{Key: remote})
}
} else {
for i := range objects.Contents {
object := &objects.Contents[i]
if fs := f.NewFsObjectWithInfo(object.Key, object); fs != nil {
out <- fs
if !strings.HasPrefix(object.Key, f.root) {
fs.Log(f, "Odd name received %q", object.Key)
continue
}
remote := object.Key[rootLength:]
fn(remote, object)
}
}
}
}
// Walk the path returning a channel of FsObjects
func (f *FsS3) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
if f.bucket == "" {
// Return no objects at top level list
close(out)
}()
fs.Stats.Error()
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
} else {
go func() {
defer close(out)
f.list(false, func(remote string, object *s3.Key) {
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
}()
}
return out
}
// Lists the buckets
func (f *FsS3) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
buckets, err := f.c.ListBuckets()
if err != nil {
fs.Stats.Error()
log.Printf("Couldn't list buckets: %s", err)
} else {
for _, bucket := range buckets {
out <- &fs.Dir{
Name: bucket.Name,
When: bucket.CreationDate,
Bytes: -1,
Count: -1,
if f.bucket == "" {
// List the buckets
go func() {
defer close(out)
buckets, err := f.c.ListBuckets()
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list buckets: %s", err)
} else {
for _, bucket := range buckets {
out <- &fs.Dir{
Name: bucket.Name,
When: bucket.CreationDate,
Bytes: -1,
Count: -1,
}
}
}
}
}()
}()
} else {
// List the directories in the path in the bucket
go func() {
defer close(out)
f.list(true, func(remote string, object *s3.Key) {
out <- &fs.Dir{
Name: remote,
Bytes: object.Size,
Count: 0,
}
})
}()
}
return out
}
@@ -290,20 +360,7 @@ func (f *FsS3) ListDir() fs.DirChan {
func (f *FsS3) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction
fs := &FsObjectS3{s3: f, remote: remote}
// Set the mtime in the headers
headers := s3.Headers{
metaMtime: swift.TimeToFloatString(modTime),
}
// Guess the content type
contentType := mime.TypeByExtension(path.Ext(remote))
if contentType == "" {
contentType = "application/octet-stream"
}
_, err := fs.s3.b.PutReaderHeaders(remote, in, size, contentType, f.perm, headers)
return fs, err
return fs, fs.Update(in, modTime, size)
}
// Mkdir creates the bucket if it doesn't exist
@@ -367,7 +424,7 @@ func (o *FsObjectS3) readMetaData() (err error) {
return nil
}
headers, err := o.s3.b.Head(o.remote, nil)
headers, err := o.s3.b.Head(o.s3.root+o.remote, nil)
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
@@ -420,7 +477,7 @@ func (o *FsObjectS3) SetModTime(modTime time.Time) {
return
}
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
_, err = o.s3.b.Update(o.remote, o.s3.perm, o.meta)
_, err = o.s3.b.Update(o.s3.root+o.remote, o.s3.perm, o.meta)
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
@@ -434,13 +491,30 @@ func (o *FsObjectS3) Storable() bool {
// Open an object for read
func (o *FsObjectS3) Open() (in io.ReadCloser, err error) {
in, err = o.s3.b.GetReader(o.remote)
in, err = o.s3.b.GetReader(o.s3.root + o.remote)
return
}
// Update the Object from in with modTime and size
func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
// Set the mtime in the headers
headers := s3.Headers{
metaMtime: swift.TimeToFloatString(modTime),
}
// Guess the content type
contentType := mime.TypeByExtension(path.Ext(o.remote))
if contentType == "" {
contentType = "application/octet-stream"
}
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, contentType, o.s3.perm, headers)
return err
}
// Remove an object
func (o *FsObjectS3) Remove() error {
return o.s3.b.Del(o.remote)
return o.s3.b.Del(o.s3.root + o.remote)
}
// Check the interfaces are satisfied

View File

@@ -1,13 +1,11 @@
// Swift interface
package swift
// FIXME need to prevent anything but ListDir working for swift://
import (
"errors"
"fmt"
"io"
"log"
"path"
"regexp"
"strings"
"time"
@@ -73,7 +71,10 @@ type FsObjectSwift struct {
// String converts this FsSwift to a string
func (f *FsSwift) String() string {
return fmt.Sprintf("Swift container %s", f.container)
if f.root == "" {
return fmt.Sprintf("Swift container %s", f.container)
}
return fmt.Sprintf("Swift container %s path %s", f.container, f.root)
}
// Pattern to match a swift path
@@ -118,19 +119,37 @@ func swiftConnection(name string) (*swift.Connection, error) {
}
// NewFs contstructs an FsSwift from the path, container:path
func NewFs(name, path string) (fs.Fs, error) {
container, directory, err := parsePath(path)
func NewFs(name, root string) (fs.Fs, error) {
container, directory, err := parsePath(root)
if err != nil {
return nil, err
}
if directory != "" {
return nil, fmt.Errorf("Directories not supported yet in %q", path)
}
c, err := swiftConnection(name)
if err != nil {
return nil, err
}
f := &FsSwift{c: *c, container: container, root: directory}
f := &FsSwift{
c: *c,
container: container,
root: directory,
}
if f.root != "" {
f.root += "/"
// Check to see if the object exists
_, _, err = f.c.Object(container, directory)
if err == nil {
remote := path.Base(directory)
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
obj := f.NewFsObject(remote)
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
}
}
return f, nil
}
@@ -162,68 +181,112 @@ func (f *FsSwift) NewFsObject(remote string) fs.Object {
return f.NewFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
func (f *FsSwift) list(directories bool, fn func(string, *swift.Object)) {
// Options for ObjectsWalk
opts := swift.ObjectsOpts{
Prefix: f.root,
Limit: 256,
}
if directories {
opts.Delimiter = '/'
}
rootLength := len(f.root)
err := f.c.ObjectsWalk(f.container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
objects, err := f.c.Objects(f.container, opts)
if err == nil {
for i := range objects {
object := &objects[i]
// FIXME if there are no directories, swift gives back the files for some reason!
if directories && !strings.HasSuffix(object.Name, "/") {
continue
}
if !strings.HasPrefix(object.Name, f.root) {
fs.Log(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
fn(remote, object)
}
}
return objects, err
})
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read container %q: %s", f.container, err)
}
}
// Walk the path returning a channel of FsObjects
func (f *FsSwift) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
// FIXME use a smaller limit?
err := f.c.ObjectsWalk(f.container, nil, func(opts *swift.ObjectsOpts) (interface{}, error) {
objects, err := f.c.Objects(f.container, opts)
if err == nil {
for i := range objects {
object := &objects[i]
if fs := f.NewFsObjectWithInfo(object.Name, object); fs != nil {
out <- fs
}
}
}
return objects, err
})
if err != nil {
fs.Stats.Error()
log.Printf("Couldn't read container %q: %s", f.container, err)
}
if f.container == "" {
// Return no objects at top level list
close(out)
}()
fs.Stats.Error()
fs.Log(f, "Can't list objects at root - choose a container using lsd")
} else {
// List the objects
go func() {
defer close(out)
f.list(false, func(remote string, object *swift.Object) {
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
}()
}
return out
}
// Lists the containers
func (f *FsSwift) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
containers, err := f.c.ContainersAll(nil)
if err != nil {
fs.Stats.Error()
log.Printf("Couldn't list containers: %s", err)
} else {
for _, container := range containers {
out <- &fs.Dir{
Name: container.Name,
Bytes: container.Bytes,
Count: container.Count,
if f.container == "" {
// List the containers
go func() {
defer close(out)
containers, err := f.c.ContainersAll(nil)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list containers: %v", err)
} else {
for _, container := range containers {
out <- &fs.Dir{
Name: container.Name,
Bytes: container.Bytes,
Count: container.Count,
}
}
}
}
}()
}()
} else {
// List the directories in the path in the container
go func() {
defer close(out)
f.list(true, func(remote string, object *swift.Object) {
out <- &fs.Dir{
Name: remote,
Bytes: object.Bytes,
Count: 0,
}
})
}()
}
return out
}
// Put the FsObject into the container
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created
// The new object may have been created if an error is returned
func (f *FsSwift) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction
fs := &FsObjectSwift{swift: f, remote: remote}
// Set the mtime
m := swift.Metadata{}
m.SetModTime(modTime)
_, err := f.c.ObjectPut(f.container, remote, in, true, "", "", m.ObjectHeaders())
return fs, err
return fs, fs.Update(in, modTime, size)
}
// Mkdir creates the container if it doesn't exist
@@ -280,7 +343,7 @@ func (o *FsObjectSwift) readMetaData() (err error) {
if o.meta != nil {
return nil
}
info, h, err := o.swift.c.Object(o.swift.container, o.remote)
info, h, err := o.swift.c.Object(o.swift.container, o.swift.root+o.remote)
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
@@ -319,7 +382,7 @@ func (o *FsObjectSwift) SetModTime(modTime time.Time) {
return
}
o.meta.SetModTime(modTime)
err = o.swift.c.ObjectUpdate(o.swift.container, o.remote, o.meta.ObjectHeaders())
err = o.swift.c.ObjectUpdate(o.swift.container, o.swift.root+o.remote, o.meta.ObjectHeaders())
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
@@ -333,13 +396,24 @@ func (o *FsObjectSwift) Storable() bool {
// Open an object for read
func (o *FsObjectSwift) Open() (in io.ReadCloser, err error) {
in, _, err = o.swift.c.ObjectOpen(o.swift.container, o.remote, true, nil)
in, _, err = o.swift.c.ObjectOpen(o.swift.container, o.swift.root+o.remote, true, nil)
return
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *FsObjectSwift) Update(in io.Reader, modTime time.Time, size int64) error {
// Set the mtime
m := swift.Metadata{}
m.SetModTime(modTime)
_, err := o.swift.c.ObjectPut(o.swift.container, o.swift.root+o.remote, in, true, "", "", m.ObjectHeaders())
return err
}
// Remove an object
func (o *FsObjectSwift) Remove() error {
return o.swift.c.ObjectDelete(o.swift.container, o.remote)
return o.swift.c.ObjectDelete(o.swift.container, o.swift.root+o.remote)
}
// Check the interfaces are satisfied

3
version.go Normal file
View File

@@ -0,0 +1,3 @@
package main
const Version = "v0.97"