1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-15 15:53:41 +00:00

Compare commits

..

51 Commits
v0.98 ... v1.03

Author SHA1 Message Date
Nick Craig-Wood
54cd46372a Version 1.03 2014-07-20 11:28:50 +01:00
Nick Craig-Wood
282cba20a0 swift, s3, dropbox: fix metadata read on Update()
This was causing changed files to be marked as corrupted on upload
2014-07-20 11:23:05 +01:00
Nick Craig-Wood
2479ce2c8e dropbox: go1.1 compatibility 2014-07-19 15:48:40 +01:00
Nick Craig-Wood
9aa4b6bd9b Version 1.02 2014-07-19 13:24:48 +01:00
Nick Craig-Wood
6c10024420 rclonetest: add --subdir flag for testing with a sub directory
Also add a test script for testing all the remotes
2014-07-19 13:07:56 +01:00
Nick Craig-Wood
e559194fb2 fs: Verify sizes are the same after transfer in Copy() 2014-07-19 13:05:07 +01:00
Nick Craig-Wood
1c472348b6 s3: Read metadata after Update or Put 2014-07-19 13:05:07 +01:00
Nick Craig-Wood
5a8bce6353 swift: Read metadata after Update or Put 2014-07-19 13:05:06 +01:00
Nick Craig-Wood
f9b31591f9 drive: Flush directory cache on Purge 2014-07-19 13:05:06 +01:00
Nick Craig-Wood
1527e64ee7 local: Implement Purger interface 2014-07-19 13:05:01 +01:00
Nick Craig-Wood
f7652db4f1 local: Make sure info is never nil 2014-07-19 11:50:11 +01:00
Nick Craig-Wood
8b75fb14c5 local: calculate md5sum on Read or Update since we check it in Copy() 2014-07-19 11:06:25 +01:00
Nick Craig-Wood
07f9a1a9f0 Update docs for Dropbox and Google Cloud Storage 2014-07-18 10:10:08 +01:00
Nick Craig-Wood
7d8bac2711 google cloud storage: fix merge conflict
Conflicts:
	rclone.go
	rclonetest/rclonetest.go
2014-07-16 12:21:01 +01:00
Nick Craig-Wood
cad9479a00 google cloud storage: Update metadata on Put since we get it back 2014-07-16 12:12:36 +01:00
Nick Craig-Wood
dfc8a375f6 dropbox: Switch to using RFC3339 for time metadata 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
7c9bdb4b7a dropbox: make limited fs work (copy single file) 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
f8bb0d9cc8 dropbox: remove metadata when we remove files 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
b185e104ed dropbox: Fix mkdir on already created directory 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
e57a4c7c0c dropbox: open the datastore in the background 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
d2f187e1a1 dropbox: Use /delta to list objects - much quicker
Also fix major performance problem - re-reading entry each time!
2014-07-15 19:27:42 +01:00
Nick Craig-Wood
c9aca33030 dropbox: Fix concurrent access to Dropbox datastore and Lower case keys in datastore 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
2b0911531c dropbox: basics of metadata in Dropbox datastore working 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
2149185fc2 dropbox: Initial support of full Fs interface
Still missing metadata support (eg SetModTime)
2014-07-15 19:27:42 +01:00
Nick Craig-Wood
0159da9f37 dropbox: graphics for the Dropbox app (used in auth process) 2014-07-15 19:27:41 +01:00
Nick Craig-Wood
680283d69f google cloud storage: fix download of files in sub directories 2014-07-15 19:27:31 +01:00
Nick Craig-Wood
c71f339e01 google cloud storage: implement ACLs and delete 2014-07-15 19:27:31 +01:00
Nick Craig-Wood
c91c96565f google cloud storage: set the Content-Type from the file name 2014-07-15 19:27:31 +01:00
Nick Craig-Wood
b72fc69fbe google cloud storage: Make operations on single files work 2014-07-15 19:27:30 +01:00
Nick Craig-Wood
a1732c21d8 google cloud storage: Initial support for full Fs interface 2014-07-15 19:27:30 +01:00
Nick Craig-Wood
b83441081c drive: factor common authentication code into googleauth module
In preparation for Google Cloud Storage support
2014-07-15 19:27:30 +01:00
Nick Craig-Wood
8a76568ea8 core: Verify MD5 sums after each transfer 2014-07-15 19:27:05 +01:00
Nick Craig-Wood
c4dc9d273a rclonetest: check sub directory and downloads 2014-07-15 13:28:48 +01:00
Nick Craig-Wood
66cf2df780 drive: check errors in Open() better 2014-07-15 13:28:35 +01:00
Nick Craig-Wood
c1a245d1c8 Factor UserAgent to fs and move Version to fs 2014-07-13 19:19:58 +01:00
Nick Craig-Wood
e40b09fe61 drive: Fix comment 2014-07-13 10:54:35 +01:00
Nick Craig-Wood
eb2b4ea8aa rclone: Don't purge if --dry-run set 2014-07-13 10:54:30 +01:00
Nick Craig-Wood
e055ed0489 rclone: change "ls" and add "lsl" and "md5sum" commands
Changed "ls" command not to show modification time by default only
size and path.  That is because it is slow for nearly all the remotes
as it requires extra metadata lookup.  All remotes can look up files
and sizes without extra operations.

Added "lsl" which does what "ls" used to - namely show modification
time, size and path.

Added "md5sum" which produces the same output as the md5sum command -
md5sums and paths that is.
2014-07-12 12:09:20 +01:00
Nick Craig-Wood
dd6d7cad3a Notes about storage systems 2014-07-09 20:50:08 +01:00
Nick Craig-Wood
37b2274e10 Version 1.01 2014-07-04 21:15:27 +01:00
Nick Craig-Wood
91cfbd4146 drive: fix transfer of big files using up lots of memory - fixes #5
This was done by making a seekWrapper which wraps an io.Reader with a
basic Seek for code.google.com/p/google-api-go-client/googleapi to
detect the length.  Without this the getReaderSize function reads the
entire file into memory to find its length.
2014-07-04 17:17:21 +01:00
Nick Craig-Wood
d4817399ff Fix release procedure 2014-07-03 22:01:25 +01:00
Nick Craig-Wood
48d259da68 Version 1.00 2014-07-03 21:56:54 +01:00
Nick Craig-Wood
f86fa6a062 Fix make tag 2014-07-03 21:43:14 +01:00
Nick Craig-Wood
93cb0a47e4 drive: fix whole second dates - fixes #4 2014-07-03 21:32:01 +01:00
Nick Craig-Wood
a12760c038 Travis build correction - fixes #2 2014-06-26 19:05:48 +01:00
Nick Craig-Wood
fdcd6a3a4c Add travis build file 2014-06-26 18:23:54 +01:00
Nick Craig-Wood
cb7891f4ff Fix retag 2014-06-26 17:57:32 +01:00
Nick Craig-Wood
8f2684fa70 Version 0.99 2014-06-26 17:49:26 +01:00
Nick Craig-Wood
7ebf48ef42 Fix --dry-run not working and add tests for it - fixes #3 2014-06-26 15:33:06 +01:00
Nick Craig-Wood
1d67b014cb Make compatible with go 1.1 - fixes #1 2014-06-26 15:18:48 +01:00
33 changed files with 2257 additions and 255 deletions

11
.travis.yml Normal file
View File

@@ -0,0 +1,11 @@
language: go
go:
- 1.1.2
- 1.2.2
- 1.3
- tip
script:
- go get ./...
- go test -v ./...

View File

@@ -1,10 +1,14 @@
TAG := $(shell git describe --tags)
LAST_TAG := $(shell git describe --tags --abbrev=0)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = "v" . $$_')
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
rclone: *.go */*.go
rclone:
@go version
go build
go install -v ./...
test: rclone
go test ./...
rclonetest/test.sh
doc: rclone.1 README.html README.txt
@@ -19,7 +23,7 @@ README.txt: README.md
install: rclone
install -d ${DESTDIR}/usr/bin
install -t ${DESTDIR}/usr/bin rclone
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
clean:
go clean ./...
@@ -31,10 +35,10 @@ website:
cd docs && hugo
upload_website: website
./rclone -v sync docs/public memstore:www-rclone-org
rclone -v sync docs/public memstore:www-rclone-org
upload:
./rclone -v copy build/ memstore:downloads-rclone-org
rclone -v copy build/ memstore:downloads-rclone-org
cross: doc
./cross-compile $(TAG)
@@ -45,17 +49,15 @@ serve:
tag:
@echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEW_TAG)"
echo -e "package main\n const Version = \"$(NEW_TAG)\"\n" | gofmt > version.go
cp -av version.go rclonetest/version.go
echo -e "package fs\n const Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
git tag $(NEW_TAG)
@echo "Add this to changelog in README.md"
@echo " * $(NEW_TAG) - " `date -I`
@echo " * $(NEW_TAG) -" `date -I`
@git log $(LAST_TAG)..$(NEW_TAG) --oneline
@echo "Then commit the changes"
@echo git commit -m "Version $(NEW_TAG)" -a -v
@echo "And finally run make retag before make cross etc"
retag:
echo git tag -f $(LAST_TAG)
git tag -f $(LAST_TAG)

View File

@@ -1,6 +1,6 @@
% rclone(1) User Manual
% Nick Craig-Wood
% Apr 24, 2014
% Jul 7, 2014
Rclone
======
@@ -12,6 +12,8 @@ Rclone is a command line program to sync files and directories to and from
* Google Drive
* Amazon S3
* Openstack Swift / Rackspace cloud files / Memset Memstore
* Dropbox
* Google Cloud Storage
* The local filesystem
Features
@@ -86,7 +88,11 @@ first with the `--dry-run` flag.
rclone ls [remote:path]
List all the objects in the the path.
List all the objects in the the path with sizes.
rclone lsl [remote:path]
List all the objects in the the path with sizes and timestamps.
rclone lsd [remote:path]
@@ -111,6 +117,11 @@ Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
rclone md5sum remote:path
Produces an md5sum file for all the objects in the path. This is in
the same format as the standard md5sum tool produces.
General options:
```
@@ -173,7 +184,7 @@ The modified time is stored as metadata on the object as
Google drive
------------
Paths are specified as drive:path Drive paths may be as deep as required.
Paths are specified as remote:path Drive paths may be as deep as required.
The initial setup for drive involves getting a token from Google drive
which you need to do in your browser. `rclone config` walks you
@@ -181,9 +192,45 @@ through it.
To copy a local directory to a drive directory called backup
rclone copy /home/source drv:backup
rclone copy /home/source remote:backup
Google drive stores modification times accurate to 1 ms.
Google drive stores modification times accurate to 1 ms natively.
Dropbox
-------
Paths are specified as remote:path Dropbox paths may be as deep as required.
The initial setup for dropbox involves getting a token from Dropbox
which you need to do in your browser. `rclone config` walks you
through it.
To copy a local directory to a drive directory called backup
rclone copy /home/source dropbox:backup
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
a Dropbox datastore called "rclone". Dropbox datastores are limited
to 100,000 rows so this is the maximum number of files rclone can
manage on Dropbox.
Google Cloud Storage
--------------------
Paths are specified as remote:path Google Cloud Storage paths may be
as deep as required.
The initial setup for Google Cloud Storage involves getting a token
from Google which you need to do in your browser. `rclone config`
walks you through it.
To copy a local directory to a google cloud storage directory called backup
rclone copy /home/source remote:backup
Google google cloud storage stores md5sums natively and rclone stores
modification times as metadata on the object, under the "mtime" key in
RFC3339 format accurate to 1ns.
Single file copies
------------------
@@ -214,7 +261,23 @@ Bugs
Changelog
---------
* v1.03 - 2014-07-20
* swift, s3, dropbox: fix updated files being marked as corrupted
* Make compile with go 1.1 again
* v1.02 - 2014-07-19
* Implement Dropbox remote
* Implement Google Cloud Storage remote
* Verify Md5sums and Sizes after copies
* Remove times from "ls" command - lists sizes only
* Add add "lsl" - lists times and sizes
* Add "md5sum" command
* v1.01 - 2014-07-04
* drive: fix transfer of big files using up lots of memory
* v1.00 - 2014-07-03
* drive: fix whole second dates
* v0.99 - 2014-06-26
* Fix --dry-run not working
* Make compatible with go 1.1
* v0.98 - 2014-05-30
* s3: Treat missing Content-Length as 0 for some ceph installations
* rclonetest: add file with a space in

View File

@@ -5,7 +5,7 @@
"menu": "menu"
},
"baseurl": "http://rclone.org",
"title": "rclone - rsync for object storage",
"description": "rclone - rsync for object storage: google drive, s3, swift, cloudfiles, memstore...",
"title": "rclone - rsync for cloud storage",
"description": "rclone - rsync for cloud storage: google drive, s3, swift, cloudfiles, dropbox, memstore...",
"canonifyurls": true
}

View File

@@ -1,8 +1,8 @@
---
title: "Rclone"
description: "rclone syncs files to and from Google Drive, S3, Swift and Cloudfiles."
description: "rclone syncs files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox and Google Cloud Storage."
type: page
date: "2014-04-26"
date: "2014-07-17"
groups: ["about"]
---
@@ -16,6 +16,8 @@ Rclone is a command line program to sync files and directories to and from
* Google Drive
* Amazon S3
* Openstack Swift / Rackspace cloud files / Memset Memstore
* Dropbox
* Google Cloud Storage
* The local filesystem
Features

View File

@@ -1,7 +1,7 @@
---
title: "Documentation"
description: "Rclone Documentation"
date: "2014-04-26"
date: "2014-07-17"
---
Install
@@ -71,11 +71,15 @@ first with the -dry-run flag.
rclone ls [remote:path]
List all the objects in the the path.
List all the objects in the the path with sizes.
rclone lsl [remote:path]
List all the objects in the the path with sizes and timestamps.
rclone lsd [remote:path]
List all directoryes/objects/buckets in the the path.
List all directories/objects/buckets in the the path.
rclone mkdir remote:path
@@ -96,6 +100,10 @@ Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
rclone md5sum remote:path
Produces an md5sum file for all the objects in the path. This is in
the same format as the standard md5sum tool produces.
General options:
```

View File

@@ -2,34 +2,34 @@
title: "Rclone downloads"
description: "Download rclone binaries for your OS."
type: page
date: "2014-05-30"
date: "2014-07-20"
---
Rclone Download v0.98
Rclone Download v1.03
=====================
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.98-windows-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.03-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.03-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.98-osx-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.03-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.03-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.98-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v0.98-linux-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.03-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.03-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.03-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.98-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v0.98-freebsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.03-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.03-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.03-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.98-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v0.98-netbsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.03-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.03-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.03-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.98-openbsd-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.03-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.03-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-plan9-386.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.03-plan9-386.zip)
Older downloads can be found [here](http://downloads.rclone.org/)

View File

@@ -4,10 +4,12 @@ description: "Rclone docs for Google drive"
date: "2014-04-26"
---
<i class="fa fa-google"></i> Google Drive
-----------------------------------------
Paths are specified as `drive:path`
Drive paths may be as deep as required, eg
`drive:directory/subdirectory`.
Drive paths may be as deep as required, eg `drive:directory/subdirectory`.
The initial setup for drive involves getting a token from Google drive
which you need to do in your browser. `rclone config` walks you

80
docs/content/dropbox.md Normal file
View File

@@ -0,0 +1,80 @@
---
title: "Dropbox"
description: "Rclone docs for Dropbox"
date: "2014-07-17"
---
<i class="fa fa-dropbox"></i> Dropbox
---------------------------------
Paths are specified as `remote:path`
Dropbox paths may be as deep as required, eg
`remote:directory/subdirectory`.
The initial setup for dropbox involves getting a token from Dropbox
which you need to do in your browser. `rclone config` walks you
through it.
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) google cloud storage
5) dropbox
6) drive
type> 5
Dropbox App Key - leave blank to use rclone's.
app_key>
Dropbox App Secret - leave blank to use rclone's.
app_secret>
Remote config
Please visit:
https://www.dropbox.com/1/oauth2/authorize?client_id=XXXXXXXXXXXXXXX&response_type=code
Enter the code: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXXXXXXXX
--------------------
[remote]
app_key =
app_secret =
token = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
You can then use it like this,
List directories in top level of your dropbox
rclone lsd remote:
List all the files in your dropbox
rclone ls remote:
To copy a local directory to a dropbox directory called backup
rclone copy /home/source remote:backup
Modified time
-------------
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
a Dropbox datastore called "rclone". Dropbox datastores are limited
to 100,000 rows so this is the maximum number of files rclone can
manage on Dropbox.

View File

@@ -0,0 +1,117 @@
---
title: "Google Cloud Storage"
description: "Rclone docs for Google Cloud Storage"
date: "2014-07-17"
---
<i class="fa fa-google"></i> Google Cloud Storage
-------------------------------------------------
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, eg `remote:bucket/path/to/dir`.
The initial setup for google cloud storage involves getting a token from Google Cloud Storage
which you need to do in your browser. `rclone config` walks you
through it.
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) google cloud storage
5) dropbox
6) drive
type> 4
Google Application Client Id - leave blank to use rclone's.
client_id>
Google Application Client Secret - leave blank to use rclone's.
client_secret>
Project number optional - needed only for list/create/delete buckets - see your developer console.
project_number> 12345678
Access Control List for new objects.
Choose a number from below, or type in your own value
* Object owner gets OWNER access, and all Authenticated Users get READER access.
1) authenticatedRead
* Object owner gets OWNER access, and project team owners get OWNER access.
2) bucketOwnerFullControl
* Object owner gets OWNER access, and project team owners get READER access.
3) bucketOwnerRead
* Object owner gets OWNER access [default if left blank].
4) private
* Object owner gets OWNER access, and project team members get access according to their roles.
5) projectPrivate
* Object owner gets OWNER access, and all Users get READER access.
6) publicRead
object_acl> 4
Access Control List for new buckets.
Choose a number from below, or type in your own value
* Project team owners get OWNER access, and all Authenticated Users get READER access.
1) authenticatedRead
* Project team owners get OWNER access [default if left blank].
2) private
* Project team members get access according to their roles.
3) projectPrivate
* Project team owners get OWNER access, and all Users get READER access.
4) publicRead
* Project team owners get OWNER access, and all Users get WRITER access.
5) publicReadWrite
bucket_acl> 2
Remote config
Go to the following link in your browser
https://accounts.google.com/o/oauth2/auth?access_type=&approval_prompt=&client_id=XXXXXXXXXXXX.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&response_type=code&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdevstorage.full_control&state=state
Log in, then type paste the token that is returned in the browser here
Enter verification code> x/xxxxxxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxxxxxxxxxx_xxxxxxxx
--------------------
[remote]
type = google cloud storage
client_id =
client_secret =
token = {"AccessToken":"xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx","Expiry":"2014-07-17T20:49:14.929208288+01:00","Extra":null}
project_number = 12345678
object_acl = private
bucket_acl = private
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
This remote is called `remote` and can now be used like this
See all the buckets in your project
rclone lsd remote:
Make a new bucket
rclone mkdir remote:bucket
List the contents of a bucket
rclone ls remote:bucket
Sync `/home/local/directory` to the remote bucket, deleting any excess
files in the bucket.
rclone sync /home/local/directory remote:bucket
Modified time
-------------
Google google cloud storage stores md5sums natively and rclone stores
modification times as metadata on the object, under the "mtime" key in
RFC3339 format accurate to 1ns.

View File

@@ -4,8 +4,8 @@ description: "Rclone docs for the local filesystem"
date: "2014-04-26"
---
Local Filesystem
----------------
<i class="fa fa-file"></i> Local Filesystem
-------------------------------------------
Local paths are specified as normal filesystem paths, eg `/path/to/wherever`, so

View File

@@ -4,13 +4,11 @@ description: "Rclone docs for Amazon S3"
date: "2014-04-26"
---
Paths are specified as `remote:bucket` or `remote:`
<i class="fa fa-archive"></i> Amazon S3
---------------------------------------
S3 paths can't refer to subdirectories within a bucket (yet).
So to copy a local directory to a s3 container called backup
rclone sync /home/source s3:backup
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, eg `remote:bucket/path/to/dir`.
Here is an example of making an s3 configuration. First run

View File

@@ -4,13 +4,17 @@ description: "Swift"
date: "2014-04-26"
---
<i class="fa fa-space-shuttle"></i>Swift
----------------------------------------
Swift refers to [Openstack Object Storage](http://www.openstack.org/software/openstack-storage/).
Commercial implementations of that being:
* [Rackspace Cloud Files](http://www.rackspace.com/cloud/files/)
* [Memset Memstore](http://www.memset.com/cloud/storage/)
Paths are specified as `remote:container` or `remote:`
Paths are specified as `remote:container` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, eg `remote:container/path/to/dir`.
Here is an example of making a swift configuration. First run

View File

@@ -20,6 +20,8 @@
<li><a href="/drive/"><i class="fa fa-google"></i> Drive</a></li>
<li><a href="/s3/"><i class="fa fa-archive"></i> S3</a></li>
<li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Swift</a></li>
<li><a href="/dropbox/"><i class="fa fa-dropbox"></i> Dropbox</a></li>
<li><a href="/googlecloudstorage/"><i class="fa fa-google"></i> Google Cloud Storage</a></li>
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
</ul>
</li>

View File

@@ -16,20 +16,20 @@ package drive
// * files with / in name
import (
"encoding/json"
"fmt"
"io"
"log"
"mime"
"net/http"
"os"
"path"
"strings"
"sync"
"time"
"code.google.com/p/goauth2/oauth"
"code.google.com/p/google-api-go-client/drive/v2"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/googleauth"
"github.com/ogier/pflag"
)
@@ -38,20 +38,30 @@ const (
rcloneClientId = "202264815644.apps.googleusercontent.com"
rcloneClientSecret = "X4Z3ca8xfWDb1Voo-F9a7ZxJ"
driveFolderType = "application/vnd.google-apps.folder"
RFC3339In = time.RFC3339
RFC3339Out = "2006-01-02T15:04:05.000000000Z07:00"
)
// Globals
var (
// Flags
driveFullList = pflag.BoolP("drive-full-list", "", true, "Use a full listing for directory list. More data but usually quicker.")
// Description of how to auth for this app
driveAuth = &googleauth.Auth{
Scope: "https://www.googleapis.com/auth/drive",
DefaultClientId: rcloneClientId,
DefaultClientSecret: rcloneClientSecret,
}
)
// Register with Fs
func init() {
fs.Register(&fs.FsInfo{
Name: "drive",
NewFs: NewFs,
Config: Config,
Name: "drive",
NewFs: NewFs,
Config: func(name string) {
driveAuth.Config(name)
},
Options: []fs.Option{{
Name: "client_id",
Help: "Google Application Client Id - leave blank to use rclone's.",
@@ -62,77 +72,6 @@ func init() {
})
}
// Configuration helper - called after the user has put in the defaults
func Config(name string) {
// See if already have a token
tokenString := fs.ConfigFile.MustValue(name, "token")
if tokenString != "" {
fmt.Printf("Already have a drive token - refresh?\n")
if !fs.Confirm() {
return
}
}
// Get a drive transport
t, err := newDriveTransport(name)
if err != nil {
log.Fatalf("Couldn't make drive transport: %v", err)
}
// Generate a URL for the user to visit for authorization.
authUrl := t.Config.AuthCodeURL("state")
fmt.Printf("Go to the following link in your browser\n")
fmt.Printf("%s\n", authUrl)
fmt.Printf("Log in, then type paste the token that is returned in the browser here\n")
// Read the code, and exchange it for a token.
fmt.Printf("Enter verification code> ")
authCode := fs.ReadLine()
_, err = t.Exchange(authCode)
if err != nil {
log.Fatalf("Failed to get token: %v", err)
}
}
// A token cache to save the token in the config file section named
type tokenCache string
// Get the token from the config file - returns an error if it isn't present
func (name tokenCache) Token() (*oauth.Token, error) {
tokenString, err := fs.ConfigFile.GetValue(string(name), "token")
if err != nil {
return nil, err
}
if tokenString == "" {
return nil, fmt.Errorf("Empty token found - please reconfigure")
}
token := new(oauth.Token)
err = json.Unmarshal([]byte(tokenString), token)
if err != nil {
return nil, err
}
return token, nil
}
// Save the token to the config file
//
// This saves the config file if it changes
func (name tokenCache) PutToken(token *oauth.Token) error {
tokenBytes, err := json.Marshal(token)
if err != nil {
return err
}
tokenString := string(tokenBytes)
old := fs.ConfigFile.MustValue(string(name), "token")
if tokenString != old {
fs.ConfigFile.SetValue(string(name), "token", tokenString)
fs.SaveConfig()
}
return nil
}
// FsDrive represents a remote drive server
type FsDrive struct {
svc *drive.Service // the connection to the drive server
@@ -265,39 +204,9 @@ OUTER:
return
}
// Makes a new drive transport from the config
func newDriveTransport(name string) (*oauth.Transport, error) {
clientId := fs.ConfigFile.MustValue(name, "client_id")
if clientId == "" {
clientId = rcloneClientId
}
clientSecret := fs.ConfigFile.MustValue(name, "client_secret")
if clientSecret == "" {
clientSecret = rcloneClientSecret
}
// Settings for authorization.
var driveConfig = &oauth.Config{
ClientId: clientId,
ClientSecret: clientSecret,
Scope: "https://www.googleapis.com/auth/drive",
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
AuthURL: "https://accounts.google.com/o/oauth2/auth",
TokenURL: "https://accounts.google.com/o/oauth2/token",
TokenCache: tokenCache(name),
}
t := &oauth.Transport{
Config: driveConfig,
Transport: http.DefaultTransport,
}
return t, nil
}
// NewFs contstructs an FsDrive from the path, container:path
func NewFs(name, path string) (fs.Fs, error) {
t, err := newDriveTransport(name)
t, err := driveAuth.NewTransport(name)
if err != nil {
return nil, err
}
@@ -306,18 +215,12 @@ func NewFs(name, path string) (fs.Fs, error) {
if err != nil {
return nil, err
}
f := &FsDrive{
root: root,
dirCache: newDirCache(),
}
// Try to pull the token from the cache; if this fails, we need to get one.
token, err := t.Config.TokenCache.Token()
if err != nil {
return nil, fmt.Errorf("Failed to get token: %s", err)
}
t.Token = token
// Create a new authorized Drive client.
f.client = t.Client()
f.svc, err = drive.New(f.client)
@@ -674,7 +577,7 @@ func (f *FsDrive) ListDir() fs.DirChan {
Bytes: -1,
Count: -1,
}
dir.When, _ = time.Parse(time.RFC3339, item.ModifiedDate)
dir.When, _ = time.Parse(RFC3339In, item.ModifiedDate)
out <- dir
return false
})
@@ -687,6 +590,30 @@ func (f *FsDrive) ListDir() fs.DirChan {
return out
}
// seekWrapper wraps an io.Reader with a basic Seek for
// code.google.com/p/google-api-go-client/googleapi
// to detect the length (see getReaderSize function)
type seekWrapper struct {
in io.Reader
size int64
}
// Read bytes from the object - see io.Reader
func (file *seekWrapper) Read(p []byte) (n int, err error) {
return file.in.Read(p)
}
// Seek - minimal implementation for Google Drive's length detection
func (file *seekWrapper) Seek(offset int64, whence int) (int64, error) {
switch whence {
case os.SEEK_CUR:
return 0, nil
case os.SEEK_END:
return file.size, nil
}
return 0, nil
}
// Put the object
//
// This assumes that the object doesn't not already exists - if you
@@ -711,7 +638,7 @@ func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64
if mimeType == "" {
mimeType = "application/octet-stream"
}
modifiedDate := modTime.Format(time.RFC3339Nano)
modifiedDate := modTime.Format(RFC3339Out)
// Define the metadata for the file we are going to create.
info := &drive.File{
@@ -723,6 +650,7 @@ func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64
}
// Make the API request to upload metadata and file data.
in = &seekWrapper{in: in, size: size}
info, err = f.svc.Files.Insert(info).Media(in).Do()
if err != nil {
return o, fmt.Errorf("Upload failed: %s", err)
@@ -768,7 +696,9 @@ func (fs *FsDrive) Precision() time.Duration {
// Purge deletes all the files and the container
//
// Returns an error if it isn't empty
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *FsDrive) Purge() error {
if f.root == "" {
return fmt.Errorf("Can't purge root directory")
@@ -778,6 +708,7 @@ func (f *FsDrive) Purge() error {
return err
}
err = f.svc.Files.Delete(f.rootId).Do()
f.dirCache.Flush()
if err != nil {
return err
}
@@ -864,7 +795,7 @@ func (o *FsObjectDrive) ModTime() time.Time {
fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
modTime, err := time.Parse(time.RFC3339, o.modifiedDate)
modTime, err := time.Parse(RFC3339In, o.modifiedDate)
if err != nil {
fs.Log(o, "Failed to read mtime from object: %s", err)
return time.Now()
@@ -882,7 +813,7 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) {
}
// New metadata
info := &drive.File{
ModifiedDate: modTime.Format(time.RFC3339Nano),
ModifiedDate: modTime.Format(RFC3339Out),
}
// Set modified date
_, err = o.drive.svc.Files.Update(o.id, info).SetModifiedDate(true).Do()
@@ -899,8 +830,11 @@ func (o *FsObjectDrive) Storable() bool {
// Open an object for read
func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
req, _ := http.NewRequest("GET", o.url, nil)
req.Header.Set("User-Agent", "rclone/1.0")
req, err := http.NewRequest("GET", o.url, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", fs.UserAgent)
res, err := o.drive.client.Do(req)
if err != nil {
return nil, err
@@ -920,10 +854,11 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) error {
info := &drive.File{
Id: o.id,
ModifiedDate: modTime.Format(time.RFC3339Nano),
ModifiedDate: modTime.Format(RFC3339Out),
}
// Make the API request to upload metadata and file data.
in = &seekWrapper{in: in, size: size}
info, err := o.drive.svc.Files.Update(info.Id, info).SetModifiedDate(true).Media(in).Do()
if err != nil {
return fmt.Errorf("Update failed: %s", err)

750
dropbox/dropbox.go Normal file
View File

@@ -0,0 +1,750 @@
// Dropbox interface
package dropbox
/*
Limitations of dropbox
File system is case insensitive
The datastore is limited to 100,000 records which therefore is the
limit of the number of files that rclone can use on dropbox.
FIXME only open datastore if we need it?
FIXME Getting this sometimes
Failed to copy: Upload failed: invalid character '<' looking for beginning of value
This is a JSON decode error - from Update / UploadByChunk
- Caused by 500 error from dropbox
- See https://github.com/stacktic/dropbox/issues/1
- Possibly confusing dropbox with excess concurrency?
*/
import (
"crypto/md5"
"errors"
"fmt"
"io"
"log"
"path"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/stacktic/dropbox"
)
// Constants
const (
rcloneAppKey = "5jcck7diasz0rqy"
rcloneAppSecret = "1n9m04y2zx7bf26"
uploadChunkSize = 64 * 1024 // chunk size for upload
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
datastoreName = "rclone"
tableName = "metadata"
md5sumField = "md5sum"
mtimeField = "mtime"
maxCommitRetries = 5
RFC3339In = time.RFC3339
RFC3339Out = "2006-01-02T15:04:05.000000000Z07:00"
)
// Register with Fs
func init() {
fs.Register(&fs.FsInfo{
Name: "dropbox",
NewFs: NewFs,
Config: Config,
Options: []fs.Option{{
Name: "app_key",
Help: "Dropbox App Key - leave blank to use rclone's.",
}, {
Name: "app_secret",
Help: "Dropbox App Secret - leave blank to use rclone's.",
}},
})
}
// Configuration helper - called after the user has put in the defaults
func Config(name string) {
// See if already have a token
token := fs.ConfigFile.MustValue(name, "token")
if token != "" {
fmt.Printf("Already have a dropbox token - refresh?\n")
if !fs.Confirm() {
return
}
}
// Get a dropbox
db := newDropbox(name)
// This method will ask the user to visit an URL and paste the generated code.
if err := db.Auth(); err != nil {
log.Fatalf("Failed to authorize: %v", err)
}
// Get the token
token = db.AccessToken()
// Stuff it in the config file if it has changed
old := fs.ConfigFile.MustValue(name, "token")
if token != old {
fs.ConfigFile.SetValue(name, "token", token)
fs.SaveConfig()
}
}
// FsDropbox represents a remote dropbox server
type FsDropbox struct {
db *dropbox.Dropbox // the connection to the dropbox server
root string // the path we are working on
slashRoot string // root with "/" prefix
slashRootSlash string // root with "/" prefix and postix
datastoreManager *dropbox.DatastoreManager
datastore *dropbox.Datastore
table *dropbox.Table
datastoreMutex sync.Mutex // lock this when using the datastore
datastoreErr error // pending errors on the datastore
}
// FsObjectDropbox describes a dropbox object
type FsObjectDropbox struct {
dropbox *FsDropbox // what this object is part of
remote string // The remote path
md5sum string // md5sum of the object
bytes int64 // size of the object
modTime time.Time // time it was last modified
}
// ------------------------------------------------------------
// String converts this FsDropbox to a string
func (f *FsDropbox) String() string {
return fmt.Sprintf("Dropbox root '%s'", f.root)
}
// Makes a new dropbox from the config
func newDropbox(name string) *dropbox.Dropbox {
db := dropbox.NewDropbox()
appKey := fs.ConfigFile.MustValue(name, "app_key")
if appKey == "" {
appKey = rcloneAppKey
}
appSecret := fs.ConfigFile.MustValue(name, "app_secret")
if appSecret == "" {
appSecret = rcloneAppSecret
}
db.SetAppInfo(appKey, appSecret)
return db
}
// NewFs contstructs an FsDropbox from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
db := newDropbox(name)
f := &FsDropbox{
db: db,
}
f.setRoot(root)
// Read the token from the config file
token := fs.ConfigFile.MustValue(name, "token")
// Authorize the client
db.SetAccessToken(token)
// Make a db to store rclone metadata in
f.datastoreManager = db.NewDatastoreManager()
// Open the datastore in the background
go f.openDataStore()
// See if the root is actually an object
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
if err == nil && !entry.IsDir {
remote := path.Base(f.root)
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
obj := f.NewFsObject(remote)
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
}
return f, nil
}
// Sets root in f
func (f *FsDropbox) setRoot(root string) {
f.root = strings.Trim(root, "/")
f.slashRoot = "/" + f.root
f.slashRootSlash = f.slashRoot
if f.root != "" {
f.slashRootSlash += "/"
}
}
// Opens the datastore in f
func (f *FsDropbox) openDataStore() {
f.datastoreMutex.Lock()
defer f.datastoreMutex.Unlock()
fs.Debug(f, "Open rclone datastore")
// Open the rclone datastore
var err error
f.datastore, err = f.datastoreManager.OpenDatastore(datastoreName)
if err != nil {
fs.Log(f, "Failed to open datastore: %v", err)
f.datastoreErr = err
return
}
// Get the table we are using
f.table, err = f.datastore.GetTable(tableName)
if err != nil {
fs.Log(f, "Failed to open datastore table: %v", err)
f.datastoreErr = err
return
}
fs.Debug(f, "Open rclone datastore finished")
}
// Return an FsObject from a path
func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) (fs.Object, error) {
o := &FsObjectDropbox{
dropbox: f,
remote: remote,
}
if info != nil {
o.setMetadataFromEntry(info)
} else {
err := o.readEntryAndSetMetadata()
if err != nil {
// logged already fs.Debug("Failed to read info: %s", err)
return nil, err
}
}
return o, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDropbox) NewFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
fs, _ := f.newFsObjectWithInfo(remote, info)
// Errors have already been logged
return fs
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDropbox) NewFsObject(remote string) fs.Object {
return f.NewFsObjectWithInfo(remote, nil)
}
// Strips the root off entry and returns it
func (f *FsDropbox) stripRoot(entry *dropbox.Entry) string {
path := entry.Path
if strings.HasPrefix(path, f.slashRootSlash) {
path = path[len(f.slashRootSlash):]
}
return path
}
// Walk the root returning a channel of FsObjects
func (f *FsDropbox) list(out fs.ObjectsChan) {
cursor := ""
for {
deltaPage, err := f.db.Delta(cursor, f.slashRoot)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list: %s", err)
break
} else {
if deltaPage.Reset && cursor != "" {
fs.Log(f, "Unexpected reset during listing - try again")
fs.Stats.Error()
break
}
fs.Debug(f, "%d delta entries received", len(deltaPage.Entries))
for i := range deltaPage.Entries {
deltaEntry := &deltaPage.Entries[i]
entry := deltaEntry.Entry
if entry == nil {
// This notifies of a deleted object
fs.Debug(f, "Deleting metadata for %q", deltaEntry.Path)
key := metadataKey(deltaEntry.Path) // Path is lowercased
err := f.deleteMetadata(key)
if err != nil {
fs.Debug(f, "Failed to delete metadata for %q", deltaEntry.Path)
// Don't accumulate Error here
}
} else {
if entry.IsDir {
// ignore directories
} else {
path := f.stripRoot(entry)
out <- f.NewFsObjectWithInfo(path, entry)
}
}
}
if !deltaPage.HasMore {
break
}
cursor = deltaPage.Cursor
}
}
}
// Walk the path returning a channel of FsObjects
func (f *FsDropbox) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
defer close(out)
f.list(out)
}()
return out
}
// Walk the path returning a channel of FsObjects
func (f *FsDropbox) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
entry, err := f.db.Metadata(f.root, true, false, "", "", metadataLimit)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list directories in root: %s", err)
} else {
for i := range entry.Contents {
entry := &entry.Contents[i]
if entry.IsDir {
out <- &fs.Dir{
Name: f.stripRoot(entry),
When: time.Time(entry.ClientMtime),
Bytes: int64(entry.Bytes),
Count: -1,
}
}
}
}
}()
return out
}
// A read closer which doesn't close the input
type readCloser struct {
in io.Reader
}
// Read bytes from the object - see io.Reader
func (rc *readCloser) Read(p []byte) (n int, err error) {
return rc.in.Read(p)
}
// Dummy close function
func (rc *readCloser) Close() error {
return nil
}
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *FsDropbox) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction
o := &FsObjectDropbox{dropbox: f, remote: remote}
return o, o.Update(in, modTime, size)
}
// Mkdir creates the container if it doesn't exist
func (f *FsDropbox) Mkdir() error {
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
if err == nil {
if entry.IsDir {
return nil
}
return fmt.Errorf("%q already exists as file", f.root)
}
_, err = f.db.CreateFolder(f.slashRoot)
return err
}
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *FsDropbox) Rmdir() error {
entry, err := f.db.Metadata(f.slashRoot, true, false, "", "", 16)
if err != nil {
return err
}
if len(entry.Contents) != 0 {
return errors.New("Directory not empty")
}
return f.Purge()
}
// Return the precision
func (fs *FsDropbox) Precision() time.Duration {
return time.Nanosecond
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *FsDropbox) Purge() error {
// Delete metadata first
var wg sync.WaitGroup
to_be_deleted := f.List()
wg.Add(fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
go func() {
defer wg.Done()
for dst := range to_be_deleted {
o := dst.(*FsObjectDropbox)
o.deleteMetadata()
}
}()
}
wg.Wait()
// Let dropbox delete the filesystem tree
_, err := f.db.Delete(f.slashRoot)
return err
}
// Tries the transaction in fn then calls commit, repeating until retry limit
//
// Holds datastore mutex while in progress
func (f *FsDropbox) transaction(fn func() error) error {
f.datastoreMutex.Lock()
defer f.datastoreMutex.Unlock()
if f.datastoreErr != nil {
return f.datastoreErr
}
var err error
for i := 1; i <= maxCommitRetries; i++ {
err = fn()
if err != nil {
return err
}
err = f.datastore.Commit()
if err == nil {
break
}
fs.Debug(f, "Retrying transaction %d/%d", i, maxCommitRetries)
}
if err != nil {
return fmt.Errorf("Failed to commit metadata changes: %s", err)
}
return nil
}
// Deletes the medadata associated with this key
func (f *FsDropbox) deleteMetadata(key string) error {
return f.transaction(func() error {
record, err := f.table.Get(key)
if err != nil {
return fmt.Errorf("Couldn't get record: %s", err)
}
if record == nil {
return nil
}
record.DeleteRecord()
return nil
})
}
// Reads the record attached to key
//
// Holds datastore mutex while in progress
func (f *FsDropbox) readRecord(key string) (*dropbox.Record, error) {
f.datastoreMutex.Lock()
defer f.datastoreMutex.Unlock()
if f.datastoreErr != nil {
return nil, f.datastoreErr
}
return f.table.Get(key)
}
// ------------------------------------------------------------
// Return the parent Fs
func (o *FsObjectDropbox) Fs() fs.Fs {
return o.dropbox
}
// Return a string version
func (o *FsObjectDropbox) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Return the remote path
func (o *FsObjectDropbox) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
//
// FIXME has to download the file!
func (o *FsObjectDropbox) Md5sum() (string, error) {
if o.md5sum != "" {
return o.md5sum, nil
}
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
return "", fmt.Errorf("Failed to read metadata: %s", err)
}
// For pre-existing files which have no md5sum can read it and set it?
// in, err := o.Open()
// if err != nil {
// return "", err
// }
// defer in.Close()
// hash := md5.New()
// _, err = io.Copy(hash, in)
// if err != nil {
// return "", err
// }
// o.md5sum = fmt.Sprintf("%x", hash.Sum(nil))
return o.md5sum, nil
}
// Size returns the size of an object in bytes
func (o *FsObjectDropbox) Size() int64 {
return o.bytes
}
// setMetadataFromEntry sets the fs data from a dropbox.Entry
//
// This isn't a complete set of metadata and has an inacurate date
func (o *FsObjectDropbox) setMetadataFromEntry(info *dropbox.Entry) {
o.bytes = int64(info.Bytes)
o.modTime = time.Time(info.ClientMtime)
}
// Reads the entry from dropbox
func (o *FsObjectDropbox) readEntry() (*dropbox.Entry, error) {
entry, err := o.dropbox.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit)
if err != nil {
fs.Debug(o, "Error reading file: %s", err)
return nil, fmt.Errorf("Error reading file: %s", err)
}
return entry, nil
}
// Read entry if not set and set metadata from it
func (o *FsObjectDropbox) readEntryAndSetMetadata() error {
// Last resort set time from client
if !o.modTime.IsZero() {
return nil
}
entry, err := o.readEntry()
if err != nil {
return err
}
o.setMetadataFromEntry(entry)
return nil
}
// Returns the remote path for the object
func (o *FsObjectDropbox) remotePath() string {
return o.dropbox.slashRootSlash + o.remote
}
// Returns the key for the metadata database for a given path
func metadataKey(path string) string {
// NB File system is case insensitive
path = strings.ToLower(path)
hash := md5.New()
hash.Write([]byte(path))
return fmt.Sprintf("%x", hash.Sum(nil))
}
// Returns the key for the metadata database
func (o *FsObjectDropbox) metadataKey() string {
return metadataKey(o.remotePath())
}
// readMetaData gets the info if it hasn't already been fetched
func (o *FsObjectDropbox) readMetaData() (err error) {
if o.md5sum != "" {
return nil
}
// fs.Debug(o, "Reading metadata from datastore")
record, err := o.dropbox.readRecord(o.metadataKey())
if err != nil {
fs.Debug(o, "Couldn't read metadata: %s", err)
record = nil
}
if record != nil {
// Read md5sum
md5sumInterface, ok, err := record.Get(md5sumField)
if err != nil {
return err
}
if !ok {
fs.Debug(o, "Couldn't find md5sum in record")
} else {
md5sum, ok := md5sumInterface.(string)
if !ok {
fs.Debug(o, "md5sum not a string")
} else {
o.md5sum = md5sum
}
}
// read mtime
mtimeInterface, ok, err := record.Get(mtimeField)
if err != nil {
return err
}
if !ok {
fs.Debug(o, "Couldn't find mtime in record")
} else {
mtime, ok := mtimeInterface.(string)
if !ok {
fs.Debug(o, "mtime not a string")
} else {
modTime, err := time.Parse(RFC3339In, mtime)
if err != nil {
return err
}
o.modTime = modTime
}
}
}
// Last resort
o.readEntryAndSetMetadata()
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *FsObjectDropbox) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
return o.modTime
}
// Sets the modification time of the local fs object into the record
// FIXME if we don't set md5sum what will that do?
func (o *FsObjectDropbox) setModTimeAndMd5sum(modTime time.Time, md5sum string) error {
key := o.metadataKey()
// fs.Debug(o, "Writing metadata to datastore")
return o.dropbox.transaction(func() error {
record, err := o.dropbox.table.GetOrInsert(key)
if err != nil {
return fmt.Errorf("Couldn't read record: %s", err)
}
if md5sum != "" {
err = record.Set(md5sumField, md5sum)
if err != nil {
return fmt.Errorf("Couldn't set md5sum record: %s", err)
}
o.md5sum = md5sum
}
if !modTime.IsZero() {
mtime := modTime.Format(RFC3339Out)
err := record.Set(mtimeField, mtime)
if err != nil {
return fmt.Errorf("Couldn't set mtime record: %s", err)
}
o.modTime = modTime
}
return nil
})
}
// Deletes the medadata associated with this file
//
// It logs any errors
func (o *FsObjectDropbox) deleteMetadata() {
fs.Debug(o, "Deleting metadata from datastore")
err := o.dropbox.deleteMetadata(o.metadataKey())
if err != nil {
fs.Log(o, "Error deleting metadata: %v", err)
fs.Stats.Error()
}
}
// Sets the modification time of the local fs object
//
// Commits the datastore
func (o *FsObjectDropbox) SetModTime(modTime time.Time) {
err := o.setModTimeAndMd5sum(modTime, "")
if err != nil {
fs.Stats.Error()
fs.Log(o, err.Error())
}
}
// Is this object storable
func (o *FsObjectDropbox) Storable() bool {
return true
}
// Open an object for read
func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
in, _, err = o.dropbox.db.Download(o.remotePath(), "", 0)
return
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) error {
// Calculate md5sum as we upload it
hash := md5.New()
rc := &readCloser{in: io.TeeReader(in, hash)}
entry, err := o.dropbox.db.UploadByChunk(rc, uploadChunkSize, o.remotePath(), true, "")
if err != nil {
return fmt.Errorf("Upload failed: %s", err)
}
o.setMetadataFromEntry(entry)
md5sum := fmt.Sprintf("%x", hash.Sum(nil))
return o.setModTimeAndMd5sum(modTime, md5sum)
}
// Remove an object
func (o *FsObjectDropbox) Remove() error {
o.deleteMetadata()
_, err := o.dropbox.db.Delete(o.remotePath())
return err
}
// Check the interfaces are satisfied
var _ fs.Fs = &FsDropbox{}
var _ fs.Purger = &FsDropbox{}
var _ fs.Object = &FsObjectDropbox{}

View File

@@ -70,10 +70,10 @@ func LoadConfig() {
// FIXME read these from the config file too
Config.Verbose = *verbose
Config.Quiet = *quiet
Config.Quiet = *dryRun
Config.ModifyWindow = *modifyWindow
Config.Checkers = *checkers
Config.Transfers = *transfers
Config.DryRun = *dryRun
ConfigPath = *configFile
@@ -147,7 +147,7 @@ func Command(commands []string) byte {
if len(result) != 1 {
continue
}
i := strings.IndexByte(optString, result[0])
i := strings.Index(optString, string(result[0]))
if i >= 0 {
return result[0]
}

View File

@@ -10,6 +10,12 @@ import (
"time"
)
// Constants
const (
// User agent for Fs which can set it
UserAgent = "rclone/" + Version
)
// Globals
var (
// Filesystem registry

View File

@@ -97,6 +97,18 @@ func Equal(src, dst Object) bool {
return true
}
// Used to remove a failed copy
func removeFailedCopy(dst Object) {
if dst != nil {
Debug(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Stats.Error()
Log(dst, "Failed to remove failed copy: %s", removeErr)
}
}
}
// Copy src object to dst or f if nil
//
// If dst is nil then the object must not exist already. If you do
@@ -126,16 +138,38 @@ func Copy(f Fs, dst, src Object) {
if err != nil {
Stats.Error()
Log(src, "Failed to copy: %s", err)
if dst != nil {
Debug(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Stats.Error()
Log(dst, "Failed to remove failed copy: %s", removeErr)
}
}
removeFailedCopy(dst)
return
}
// Verify sizes are the same after transfer
if src.Size() != dst.Size() {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
Log(dst, "%s", err)
removeFailedCopy(dst)
return
}
// Verify md5sums are the same after transfer - ignoring blank md5sums
srcMd5sum, md5sumErr := src.Md5sum()
if md5sumErr != nil {
Stats.Error()
Log(src, "Failed to read md5sum: %s", md5sumErr)
} else if srcMd5sum != "" {
dstMd5sum, md5sumErr := dst.Md5sum()
if md5sumErr != nil {
Stats.Error()
Log(dst, "Failed to read md5sum: %s", md5sumErr)
} else if dstMd5sum != "" && srcMd5sum != dstMd5sum {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
Log(dst, "%s", err)
removeFailedCopy(dst)
return
}
}
Debug(src, actionTaken)
}
@@ -159,7 +193,7 @@ func checkOne(pair ObjectPair, out ObjectPairChan) {
out <- pair
}
// Read FsObjects~s on in send to out if they need uploading
// Read Objects~s on in send to out if they need uploading
//
// FIXME potentially doing lots of MD5SUMS at once
func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
@@ -172,13 +206,17 @@ func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
}
}
// Read FsObjects on in and copy them
// Read Objects on in and copy them
func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
defer wg.Done()
for pair := range in {
src := pair.src
Stats.Transferring(src)
Copy(fdst, pair.dst, src)
if Config.DryRun {
Debug(src, "Not copying as --dry-run")
} else {
Copy(fdst, pair.dst, src)
}
Stats.DoneTransferring(src)
}
}
@@ -372,10 +410,10 @@ func Check(fdst, fsrc Fs) error {
return nil
}
// List the Fs to stdout
// List the Fs to the supplied function
//
// Lists in parallel which may get them out of order
func List(f Fs) error {
func ListFn(f Fs, fn func(Object)) error {
in := f.List()
var wg sync.WaitGroup
wg.Add(Config.Checkers)
@@ -383,10 +421,7 @@ func List(f Fs) error {
go func() {
defer wg.Done()
for o := range in {
Stats.Checking(o)
modTime := o.ModTime()
Stats.DoneChecking(o)
fmt.Printf("%9d %19s %s\n", o.Size(), modTime.Format("2006-01-02 15:04:05.00000000"), o.Remote())
fn(o)
}
}()
}
@@ -394,6 +429,49 @@ func List(f Fs) error {
return nil
}
// List the Fs to stdout
//
// Shows size and path
//
// Lists in parallel which may get them out of order
func List(f Fs) error {
return ListFn(f, func(o Object) {
fmt.Printf("%9d %s\n", o.Size(), o.Remote())
})
}
// List the Fs to stdout
//
// Shows size, mod time and path
//
// Lists in parallel which may get them out of order
func ListLong(f Fs) error {
return ListFn(f, func(o Object) {
Stats.Checking(o)
modTime := o.ModTime()
Stats.DoneChecking(o)
fmt.Printf("%9d %19s %s\n", o.Size(), modTime.Format("2006-01-02 15:04:05.00000000"), o.Remote())
})
}
// List the Fs to stdout
//
// Produces the same output as the md5sum command
//
// Lists in parallel which may get them out of order
func Md5sum(f Fs) error {
return ListFn(f, func(o Object) {
Stats.Checking(o)
md5sum, err := o.Md5sum()
Stats.DoneChecking(o)
if err != nil {
Debug(o, "Failed to read MD5: %v", err)
md5sum = "UNKNOWN"
}
fmt.Printf("%32s %s\n", md5sum, o.Remote())
})
}
// List the directories/buckets/containers in the Fs to stdout
func ListDir(f Fs) error {
for dir := range f.ListDir() {
@@ -431,10 +509,14 @@ func Rmdir(f Fs) error {
// FIXME doesn't delete local directories
func Purge(f Fs) error {
if purger, ok := f.(Purger); ok {
err := purger.Purge()
if err != nil {
Stats.Error()
return err
if Config.DryRun {
Debug(f, "Not purging as --dry-run set")
} else {
err := purger.Purge()
if err != nil {
Stats.Error()
return err
}
}
} else {
DeleteFiles(f.List())

3
fs/version.go Normal file
View File

@@ -0,0 +1,3 @@
package fs
const Version = "v1.03"

138
googleauth/googleauth.go Normal file
View File

@@ -0,0 +1,138 @@
// Common authentication between Google Drive and Google Cloud Storage
package googleauth
import (
"encoding/json"
"fmt"
"log"
"net/http"
"code.google.com/p/goauth2/oauth"
"github.com/ncw/rclone/fs"
)
// A token cache to save the token in the config file section named
type TokenCache string
// Get the token from the config file - returns an error if it isn't present
func (name TokenCache) Token() (*oauth.Token, error) {
tokenString, err := fs.ConfigFile.GetValue(string(name), "token")
if err != nil {
return nil, err
}
if tokenString == "" {
return nil, fmt.Errorf("Empty token found - please reconfigure")
}
token := new(oauth.Token)
err = json.Unmarshal([]byte(tokenString), token)
if err != nil {
return nil, err
}
return token, nil
}
// Save the token to the config file
//
// This saves the config file if it changes
func (name TokenCache) PutToken(token *oauth.Token) error {
tokenBytes, err := json.Marshal(token)
if err != nil {
return err
}
tokenString := string(tokenBytes)
old := fs.ConfigFile.MustValue(string(name), "token")
if tokenString != old {
fs.ConfigFile.SetValue(string(name), "token", tokenString)
fs.SaveConfig()
}
return nil
}
// Auth contains information to authenticate an app against google services
type Auth struct {
Scope string
DefaultClientId string
DefaultClientSecret string
}
// Makes a new transport using authorisation from the config
//
// Doesn't have a token yet
func (auth *Auth) newTransport(name string) (*oauth.Transport, error) {
clientId := fs.ConfigFile.MustValue(name, "client_id")
if clientId == "" {
clientId = auth.DefaultClientId
}
clientSecret := fs.ConfigFile.MustValue(name, "client_secret")
if clientSecret == "" {
clientSecret = auth.DefaultClientSecret
}
// Settings for authorization.
var config = &oauth.Config{
ClientId: clientId,
ClientSecret: clientSecret,
Scope: auth.Scope,
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
AuthURL: "https://accounts.google.com/o/oauth2/auth",
TokenURL: "https://accounts.google.com/o/oauth2/token",
TokenCache: TokenCache(name),
}
t := &oauth.Transport{
Config: config,
Transport: http.DefaultTransport,
}
return t, nil
}
// Makes a new transport using authorisation from the config with token
func (auth *Auth) NewTransport(name string) (*oauth.Transport, error) {
t, err := auth.newTransport(name)
if err != nil {
return nil, err
}
// Try to pull the token from the cache; if this fails, we need to get one.
token, err := t.Config.TokenCache.Token()
if err != nil {
return nil, fmt.Errorf("Failed to get token: %s", err)
}
t.Token = token
return t, nil
}
// Configuration helper - called after the user has put in the defaults
func (auth *Auth) Config(name string) {
// See if already have a token
tokenString := fs.ConfigFile.MustValue(name, "token")
if tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !fs.Confirm() {
return
}
}
// Get a transport
t, err := auth.newTransport(name)
if err != nil {
log.Fatalf("Couldn't make transport: %v", err)
}
// Generate a URL for the user to visit for authorization.
authUrl := t.Config.AuthCodeURL("state")
fmt.Printf("Go to the following link in your browser\n")
fmt.Printf("%s\n", authUrl)
fmt.Printf("Log in, then type paste the token that is returned in the browser here\n")
// Read the code, and exchange it for a token.
fmt.Printf("Enter verification code> ")
authCode := fs.ReadLine()
_, err = t.Exchange(authCode)
if err != nil {
log.Fatalf("Failed to get token: %v", err)
}
}

View File

@@ -0,0 +1,570 @@
// Google Cloud Storage interface
package googlecloudstorage
/*
Notes
Can't set Updated but can set Metadata on object creation
Patch needs full_control not just read_write
FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 error
- https://code.google.com/p/google-api-go-client/issues/detail?id=64
*/
import (
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"mime"
"net/http"
"path"
"regexp"
"strings"
"time"
"code.google.com/p/google-api-go-client/googleapi"
"code.google.com/p/google-api-go-client/storage/v1"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/googleauth"
)
const (
rcloneClientId = "202264815644.apps.googleusercontent.com"
rcloneClientSecret = "X4Z3ca8xfWDb1Voo-F9a7ZxJ"
RFC3339In = time.RFC3339
RFC3339Out = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 256 // chunk size to read directory listings
)
var (
// Description of how to auth for this app
storageAuth = &googleauth.Auth{
Scope: storage.DevstorageFull_controlScope,
DefaultClientId: rcloneClientId,
DefaultClientSecret: rcloneClientSecret,
}
)
// Register with Fs
func init() {
fs.Register(&fs.FsInfo{
Name: "google cloud storage",
NewFs: NewFs,
Config: func(name string) {
storageAuth.Config(name)
},
Options: []fs.Option{{
Name: "client_id",
Help: "Google Application Client Id - leave blank to use rclone's.",
}, {
Name: "client_secret",
Help: "Google Application Client Secret - leave blank to use rclone's.",
}, {
Name: "project_number",
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
}, {
Value: "bucketOwnerFullControl",
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
}, {
Value: "bucketOwnerRead",
Help: "Object owner gets OWNER access, and project team owners get READER access.",
}, {
Value: "private",
Help: "Object owner gets OWNER access [default if left blank].",
}, {
Value: "projectPrivate",
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Object owner gets OWNER access, and all Users get READER access.",
}},
}, {
Name: "bucket_acl",
Help: "Access Control List for new buckets.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
}, {
Value: "private",
Help: "Project team owners get OWNER access [default if left blank].",
}, {
Value: "projectPrivate",
Help: "Project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Project team owners get OWNER access, and all Users get READER access.",
}, {
Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
}},
}},
})
}
// FsStorage represents a remote storage server
type FsStorage struct {
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
root string // the path we are working on if any
projectNumber string // used for finding buckets
objectAcl string // used when creating new objects
bucketAcl string // used when creating new buckets
}
// FsObjectStorage describes a storage object
//
// Will definitely have info but maybe not meta
type FsObjectStorage struct {
storage *FsStorage // what this object is part of
remote string // The remote path
url string // download path
md5sum string // The MD5Sum of the object
bytes int64 // Bytes in the object
modTime time.Time // Modified time of the object
}
// ------------------------------------------------------------
// String converts this FsStorage to a string
func (f *FsStorage) String() string {
if f.root == "" {
return fmt.Sprintf("Storage bucket %s", f.bucket)
}
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
}
// Pattern to match a storage path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a storage 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = fmt.Errorf("Couldn't find bucket in storage path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// NewFs contstructs an FsStorage from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
t, err := storageAuth.NewTransport(name)
if err != nil {
return nil, err
}
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
f := &FsStorage{
bucket: bucket,
root: directory,
projectNumber: fs.ConfigFile.MustValue(name, "project_number"),
objectAcl: fs.ConfigFile.MustValue(name, "object_acl"),
bucketAcl: fs.ConfigFile.MustValue(name, "bucket_acl"),
}
if f.objectAcl == "" {
f.objectAcl = "private"
}
if f.bucketAcl == "" {
f.bucketAcl = "private"
}
// Create a new authorized Drive client.
f.client = t.Client()
f.svc, err = storage.New(f.client)
if err != nil {
return nil, fmt.Errorf("Couldn't create Google Cloud Storage client: %s", err)
}
if f.root != "" {
f.root += "/"
// Check to see if the object exists
_, err = f.svc.Objects.Get(bucket, directory).Do()
if err == nil {
remote := path.Base(directory)
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
obj := f.NewFsObject(remote)
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
}
}
return f, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsStorage) NewFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
o := &FsObjectStorage{
storage: f,
remote: remote,
}
if info != nil {
o.setMetaData(info)
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
// logged already FsDebug("Failed to read info: %s", err)
return nil
}
}
return o
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsStorage) NewFsObject(remote string) fs.Object {
return f.NewFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
list := f.svc.Objects.List(f.bucket).Prefix(f.root).MaxResults(listChunks)
if directories {
list = list.Delimiter("/")
}
rootLength := len(f.root)
for {
objects, err := list.Do()
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
return
}
for _, object := range objects.Items {
if directories && !strings.HasSuffix(object.Name, "/") {
continue
}
if !strings.HasPrefix(object.Name, f.root) {
fs.Log(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
fn(remote, object)
}
if objects.NextPageToken == "" {
break
}
list.PageToken(objects.NextPageToken)
}
}
// Walk the path returning a channel of FsObjects
func (f *FsStorage) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
if f.bucket == "" {
// Return no objects at top level list
close(out)
fs.Stats.Error()
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
} else {
// List the objects
go func() {
defer close(out)
f.list(false, func(remote string, object *storage.Object) {
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
}()
}
return out
}
// Lists the buckets
func (f *FsStorage) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
if f.bucket == "" {
// List the buckets
go func() {
defer close(out)
if f.projectNumber == "" {
fs.Stats.Error()
fs.Log(f, "Can't list buckets without project number")
return
}
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
for {
buckets, err := listBuckets.Do()
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list buckets: %v", err)
break
} else {
for _, bucket := range buckets.Items {
out <- &fs.Dir{
Name: bucket.Name,
Bytes: 0,
Count: 0,
}
}
}
if buckets.NextPageToken == "" {
break
}
listBuckets.PageToken(buckets.NextPageToken)
}
}()
} else {
// List the directories in the path in the bucket
go func() {
defer close(out)
f.list(true, func(remote string, object *storage.Object) {
out <- &fs.Dir{
Name: remote,
Bytes: int64(object.Size),
Count: 0,
}
})
}()
}
return out
}
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *FsStorage) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction
fs := &FsObjectStorage{storage: f, remote: remote}
return fs, fs.Update(in, modTime, size)
}
// Mkdir creates the bucket if it doesn't exist
func (f *FsStorage) Mkdir() error {
_, err := f.svc.Buckets.Get(f.bucket).Do()
if err == nil {
// Bucket already exists
return nil
}
if f.projectNumber == "" {
return fmt.Errorf("Can't make bucket without project number")
}
bucket := storage.Bucket{
Name: f.bucket,
}
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketAcl).Do()
return err
}
// Rmdir deletes the bucket
//
// Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty.
func (f *FsStorage) Rmdir() error {
return f.svc.Buckets.Delete(f.bucket).Do()
}
// Return the precision
func (fs *FsStorage) Precision() time.Duration {
return time.Nanosecond
}
// ------------------------------------------------------------
// Return the parent Fs
func (o *FsObjectStorage) Fs() fs.Fs {
return o.storage
}
// Return a string version
func (o *FsObjectStorage) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Return the remote path
func (o *FsObjectStorage) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *FsObjectStorage) Md5sum() (string, error) {
return o.md5sum, nil
}
// Size returns the size of an object in bytes
func (o *FsObjectStorage) Size() int64 {
return o.bytes
}
// setMetaData sets the fs data from a storage.Object
func (o *FsObjectStorage) setMetaData(info *storage.Object) {
o.url = info.MediaLink
o.bytes = int64(info.Size)
// Read md5sum
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
if err != nil {
fs.Log(o, "Bad MD5 decode: %v", err)
} else {
o.md5sum = hex.EncodeToString(md5sumData)
}
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(RFC3339In, mtimeString)
if err == nil {
o.modTime = modTime
return
} else {
fs.Debug(o, "Failed to read mtime from metadata: %s", err)
}
}
// Fallback to the Updated time
modTime, err := time.Parse(RFC3339In, info.Updated)
if err != nil {
fs.Log(o, "Bad time decode: %v", err)
} else {
o.modTime = modTime
}
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *FsObjectStorage) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
object, err := o.storage.svc.Objects.Get(o.storage.bucket, o.storage.root+o.remote).Do()
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
}
o.setMetaData(object)
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *FsObjectStorage) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
// fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
return o.modTime
}
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(RFC3339Out)
return metadata
}
// Sets the modification time of the local fs object
func (o *FsObjectStorage) SetModTime(modTime time.Time) {
// This only adds metadata so will perserve other metadata
object := storage.Object{
Bucket: o.storage.bucket,
Name: o.storage.root + o.remote,
Metadata: metadataFromModTime(modTime),
}
_, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
}
}
// Is this object storable
func (o *FsObjectStorage) Storable() bool {
return true
}
// Open an object for read
func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
// This is slightly complicated by Go here insisting on
// decoding the %2F in URLs into / which is legal in http, but
// unfortunately not what the storage server wants.
//
// So first encode all the % into their encoded form
// URL will decode them giving our original escaped string
url := strings.Replace(o.url, "%", "%25", -1)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
// SetOpaque sets Opaque such that HTTP requests to it don't
// alter any hex-escaped characters
googleapi.SetOpaque(req.URL)
req.Header.Set("User-Agent", fs.UserAgent)
res, err := o.storage.client.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
res.Body.Close()
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *FsObjectStorage) Update(in io.Reader, modTime time.Time, size int64) error {
// Guess the content type
contentType := mime.TypeByExtension(path.Ext(o.remote))
if contentType == "" {
contentType = "application/octet-stream"
}
object := storage.Object{
Bucket: o.storage.bucket,
Name: o.storage.root + o.remote,
ContentType: contentType,
Size: uint64(size),
Updated: modTime.Format(RFC3339Out), // Doesn't get set
Metadata: metadataFromModTime(modTime),
}
newObject, err := o.storage.svc.Objects.Insert(o.storage.bucket, &object).Media(in).Name(object.Name).PredefinedAcl(o.storage.objectAcl).Do()
// Set the metadata for the new object while we have it
o.setMetaData(newObject)
return err
}
// Remove an object
func (o *FsObjectStorage) Remove() error {
return o.storage.svc.Objects.Delete(o.storage.bucket, o.storage.root+o.remote).Do()
}
// Check the interfaces are satisfied
var _ fs.Fs = &FsStorage{}
var _ fs.Object = &FsObjectStorage{}

BIN
graphics/rclone-256x256.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

BIN
graphics/rclone-64x64.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.7 KiB

View File

@@ -3,7 +3,9 @@ package local
import (
"crypto/md5"
"encoding/hex"
"fmt"
"hash"
"io"
"io/ioutil"
"log"
@@ -36,7 +38,8 @@ type FsObjectLocal struct {
local fs.Fs // The Fs this object is part of
remote string // The remote path
path string // The local path
info os.FileInfo // Interface for file info
info os.FileInfo // Interface for file info (always present)
md5sum string // the md5sum of the object or "" if not calculated
}
// ------------------------------------------------------------
@@ -172,9 +175,13 @@ func (f *FsLocal) ListDir() fs.DirChan {
// Puts the FsObject to the local filesystem
func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
dstPath := filepath.Join(f.root, remote)
// Temporary FsObject under construction
fs := &FsObjectLocal{local: f, remote: remote, path: dstPath}
return fs, fs.Update(in, modTime, size)
// Temporary FsObject under construction - info filled in by Update()
o := &FsObjectLocal{local: f, remote: remote, path: dstPath}
err := o.Update(in, modTime, size)
if err != nil {
return nil, err
}
return o, nil
}
// Mkdir creates the directory if it doesn't exist
@@ -246,6 +253,15 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
return
}
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *FsLocal) Purge() error {
return os.RemoveAll(f.root)
}
// ------------------------------------------------------------
// Return the parent Fs
@@ -268,21 +284,30 @@ func (o *FsObjectLocal) Remote() string {
// Md5sum calculates the Md5sum of a file returning a lowercase hex string
func (o *FsObjectLocal) Md5sum() (string, error) {
if o.md5sum != "" {
return o.md5sum, nil
}
in, err := os.Open(o.path)
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to open: %s", err)
return "", err
}
defer in.Close() // FIXME ignoring error
hash := md5.New()
_, err = io.Copy(hash, in)
closeErr := in.Close()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to read: %s", err)
return "", err
}
return fmt.Sprintf("%x", hash.Sum(nil)), nil
if closeErr != nil {
fs.Stats.Error()
fs.Log(o, "Failed to close: %s", closeErr)
return "", closeErr
}
o.md5sum = hex.EncodeToString(hash.Sum(nil))
return o.md5sum, nil
}
// Size returns the size of an object in bytes
@@ -316,9 +341,47 @@ func (o *FsObjectLocal) Storable() bool {
return true
}
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
// object that is read
type localOpenFile struct {
o *FsObjectLocal // object that is open
in io.ReadCloser // handle we are wrapping
hash hash.Hash // currently accumulating MD5
}
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
n, err = file.in.Read(p)
if n > 0 {
// Hash routines never return an error
_, _ = file.hash.Write(p[:n])
}
return
}
// Close the object and update the md5sum
func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
file.o.md5sum = hex.EncodeToString(file.hash.Sum(nil))
} else {
file.o.md5sum = ""
}
return err
}
// Open an object for read
func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
in, err = os.Open(o.path)
if err != nil {
return
}
// Update the md5sum as we go along
in = &localOpenFile{
o: o,
in: in,
hash: md5.New(),
}
return
}
@@ -335,6 +398,10 @@ func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) erro
return err
}
// Calculate the md5sum of the object we are reading as we go along
hash := md5.New()
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
outErr := out.Close()
if err != nil {
@@ -344,9 +411,14 @@ func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) erro
return outErr
}
// All successful so update the md5sum
o.md5sum = hex.EncodeToString(hash.Sum(nil))
// Set the mtime
o.SetModTime(modTime)
return nil
// ReRead info now that we have finished
return o.lstat()
}
// Stat a FsObject into info
@@ -363,4 +435,5 @@ func (o *FsObjectLocal) Remove() error {
// Check the interfaces are satisfied
var _ fs.Fs = &FsLocal{}
var _ fs.Purger = &FsLocal{}
var _ fs.Object = &FsObjectLocal{}

View File

@@ -15,6 +15,14 @@ Todo
* Make a fs.Errorf and count errors and log them at a different level
* Add max object size to fs metadata - 5GB for swift, infinite for local, ? for s3
* tie into -max-size flag
* FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs
creation in common code? Or try for as much as possible?
* FIXME Account all the transactons (ls etc) using a different
Roundtripper wrapper which wraps the transactions?
* FIXME write tests for local file system
* FIXME implement tests for single file operations in rclonetest
* Need to make directory objects otherwise can't upload an empty directory
* Or could upload empty directories only?
Ideas
* could do encryption - put IV into metadata?
@@ -23,16 +31,9 @@ Ideas
* support
* sftp
* scp
* Google cloud storage: https://developers.google.com/storage/
* rsync over ssh
* dropbox: https://github.com/nickoneill/go-dropbox (no MD5s)
* control times sync (which is slow) with -a --archive flag?
Need to make directory objects otherwise can't upload an empty directory
* Or could upload empty directories only?
* Can't purge a local filesystem because it leaves the directories behind
Copying a single file? Or maybe with a glob pattern? Could do with LimitedFs
* control times sync (which is slow with some remotes) with -a --archive flag?
* Copy a glob pattern - could do with LimitedFs
s3
* Can maybe set last modified?
@@ -45,20 +46,13 @@ Bugs
* When doing copy it recurses the whole of the destination FS which isn't necessary
Making a release
* go build ./...
* cd rclonetest
* go build
* ./rclonetest memstore:
* ./rclonetest s3:
* ./rclonetest drive2:
* ./rclonetest /tmp/z
* cd ..
* make test
* make tag
* edit README.md Changelog
* git commit version.go rclonetest/version.go README.md docs/content/downloads.md
* edit README.md
* git commit fs/version.go README.md docs/content/downloads.md
* make retag
* . ~/bin/go-cross
* make cross
* make upload
* make upload_website
* git push --tags
* git push --tags origin master

View File

@@ -17,6 +17,8 @@ import (
"github.com/ncw/rclone/fs"
// Active file systems
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
@@ -92,7 +94,7 @@ var Commands = []Command{
Name: "ls",
ArgsHelp: "[remote://path]",
Help: `
List all the objects in the the path.`,
List all the objects in the the path with size and path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.List(fdst)
if err != nil {
@@ -116,6 +118,34 @@ var Commands = []Command{
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "lsl",
ArgsHelp: "[remote://path]",
Help: `
List all the objects in the the path with modification time, size and path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.ListLong(fdst)
if err != nil {
log.Fatalf("Failed to list long: %v", err)
}
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "md5sum",
ArgsHelp: "[remote://path]",
Help: `
Produces an md5sum file for all the objects in the path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Md5sum(fdst)
if err != nil {
log.Fatalf("Failed to list: %v", err)
}
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "mkdir",
ArgsHelp: "remote://path",
@@ -200,7 +230,7 @@ Syntax: [options] subcommand <parameters> <parameters...>
Subcommands:
`, Version)
`, fs.Version)
for i := range Commands {
cmd := &Commands[i]
fmt.Fprintf(os.Stderr, " %s %s\n", cmd.Name, cmd.ArgsHelp)
@@ -301,7 +331,7 @@ func StartStats() {
func main() {
ParseFlags()
if *version {
fmt.Printf("rclone %s\n", Version)
fmt.Printf("rclone %s\n", fs.Version)
os.Exit(0)
}
command, args := ParseCommand()

View File

@@ -18,6 +18,8 @@ import (
// Active file systems
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
@@ -27,6 +29,7 @@ import (
var (
localName, remoteName string
version = pflag.BoolP("version", "V", false, "Print the version number")
subDir = pflag.BoolP("subdir", "S", false, "Test with a sub directory")
)
// Represents an item for checking
@@ -115,7 +118,12 @@ func Time(timeString string) time.Time {
func WriteFile(filePath, content string, t time.Time) {
// FIXME make directories?
filePath = path.Join(localName, filePath)
err := ioutil.WriteFile(filePath, []byte(content), 0600)
dirPath := path.Dir(filePath)
err := os.MkdirAll(dirPath, 0770)
if err != nil {
log.Fatalf("Failed to make directories %q: %v", dirPath, err)
}
err = ioutil.WriteFile(filePath, []byte(content), 0600)
if err != nil {
log.Fatalf("Failed to write file %q: %v", filePath, err)
}
@@ -145,25 +153,66 @@ func TestMkdir(flocal, fremote fs.Fs) {
CheckListing(fremote, items)
}
var t1 = Time("2001-02-03T04:05:06.999999999Z")
var t1 = Time("2001-02-03T04:05:06.499999999Z")
var t2 = Time("2011-12-25T12:59:59.123456789Z")
var t3 = Time("2011-12-30T12:59:59.000000000Z")
func TestCopy(flocal, fremote fs.Fs) {
WriteFile("empty space", "", t1)
WriteFile("sub dir/hello world", "hello world", t1)
// Check dry run is working
log.Printf("Copy with --dry-run")
fs.Config.DryRun = true
err := fs.Sync(fremote, flocal, false)
fs.Config.DryRun = false
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
items := []Item{
{Path: "empty space", Size: 0, ModTime: t1, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
CheckListing(flocal, items)
CheckListing(fremote, []Item{})
// Now without dry run
log.Printf("Copy")
err = fs.Sync(fremote, flocal, false)
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// Now delete the local file and download it
err = os.Remove(localName + "/sub dir/hello world")
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
CheckListing(flocal, []Item{})
CheckListing(fremote, items)
log.Printf("Copy - redownload")
err = fs.Sync(flocal, fremote, false)
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// Clean the directory
cleanTempDir()
}
func TestSync(flocal, fremote fs.Fs) {
WriteFile("empty space", "", t1)
log.Printf("Sync after changing file modtime only")
err := os.Chtimes(localName+"/empty space", t2, t2)
if err != nil {
@@ -182,14 +231,14 @@ func TestSync(flocal, fremote fs.Fs) {
// ------------------------------------------------------------
log.Printf("Sync after adding a file")
WriteFile("potato", "------------------------------------------------------------", t1)
WriteFile("potato", "------------------------------------------------------------", t3)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
{Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
@@ -197,36 +246,66 @@ func TestSync(flocal, fremote fs.Fs) {
// ------------------------------------------------------------
log.Printf("Sync after changing a file's size only")
WriteFile("potato", "smaller but same date", t1)
WriteFile("potato", "smaller but same date", t3)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t1, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after removing a file")
err = os.Remove(localName + "/potato")
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
log.Printf("Sync after changing a file's contents, modtime but not length")
WriteFile("potato", "SMALLER BUT SAME DATE", t2)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after removing a file and adding a file --dry-run")
WriteFile("potato2", "------------------------------------------------------------", t1)
err = os.Remove(localName + "/potato")
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
fs.Config.DryRun = true
err = fs.Sync(fremote, flocal, true)
fs.Config.DryRun = false
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
before := []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, before)
log.Printf("Sync after removing a file and adding a file")
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
func TestLs(flocal, fremote fs.Fs) {
@@ -243,11 +322,19 @@ func TestLsd(flocal, fremote fs.Fs) {
func TestCheck(flocal, fremote fs.Fs) {
}
func TestPurge(flocal, fremote fs.Fs) {
func TestPurge(fremote fs.Fs) {
err := fs.Purge(fremote)
if err != nil {
log.Fatalf("Purge failed: %v", err)
}
unexpected := 0
for obj := range fremote.List() {
unexpected++
log.Printf("Found unexpected item %s", obj.Remote())
}
if unexpected != 0 {
log.Fatalf("exiting as found %d unexpected items", unexpected)
}
}
func TestRmdir(flocal, fremote fs.Fs) {
@@ -267,7 +354,7 @@ directory under it and perform tests on it, deleting it at the end.
Options:
`, Version)
`, fs.Version)
pflag.PrintDefaults()
}
@@ -284,7 +371,7 @@ func main() {
pflag.Usage = syntaxError
pflag.Parse()
if *version {
fmt.Printf("rclonetest %s\n", Version)
fmt.Printf("rclonetest %s\n", fs.Version)
os.Exit(0)
}
fs.LoadConfig()
@@ -301,6 +388,15 @@ func main() {
remoteName += "/"
}
remoteName += RandomString(32)
var parentRemote fs.Fs
if *subDir {
var err error
parentRemote, err = fs.NewFs(remoteName)
if err != nil {
log.Fatalf("Failed to make parent %q: %v", remoteName, err)
}
remoteName += "/" + RandomString(8)
}
log.Printf("Testing with remote %q", remoteName)
var err error
localName, err = ioutil.TempDir("", "rclone")
@@ -326,9 +422,13 @@ func main() {
TestLs(flocal, fremote)
TestLsd(flocal, fremote)
TestCheck(flocal, fremote)
TestPurge(flocal, fremote)
TestPurge(fremote)
//TestRmdir(flocal, fremote)
if parentRemote != nil {
TestPurge(parentRemote)
}
cleanTempDir()
log.Printf("Tests OK")
}

25
rclonetest/test.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
go install
REMOTES="
memstore:
s3:
drive2:
gcs:
dropbox:
/tmp/z
"
function test_remote {
args=$@
rclonetest $args || {
echo "*** rclonetest $args FAILED ***"
exit 1
}
}
for remote in $REMOTES; do
test_remote $remote
test_remote --subdir $remote
done

View File

@@ -1,3 +0,0 @@
package main
const Version = "v0.98"

View File

@@ -514,6 +514,12 @@ func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
}
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, contentType, o.s3.perm, headers)
if err != nil {
return err
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData()
return err
}

View File

@@ -107,9 +107,10 @@ func swiftConnection(name string) (*swift.Connection, error) {
return nil, errors.New("auth not found")
}
c := &swift.Connection{
UserName: userName,
ApiKey: apiKey,
AuthUrl: authUrl,
UserName: userName,
ApiKey: apiKey,
AuthUrl: authUrl,
UserAgent: fs.UserAgent,
}
err := c.Authenticate()
if err != nil {
@@ -408,6 +409,12 @@ func (o *FsObjectSwift) Update(in io.Reader, modTime time.Time, size int64) erro
m := swift.Metadata{}
m.SetModTime(modTime)
_, err := o.swift.c.ObjectPut(o.swift.container, o.swift.root+o.remote, in, true, "", "", m.ObjectHeaders())
if err != nil {
return err
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData()
return err
}

View File

@@ -1,3 +0,0 @@
package main
const Version = "v0.98"