1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

...

136 Commits
v0.99 ... v1.16

Author SHA1 Message Date
Nick Craig-Wood
539ad44757 Version v1.16 2015-06-09 18:00:33 +01:00
Nick Craig-Wood
74994a2ec1 Fix uploading big files which was causing timeouts or panics
The symtom was one of these two on upload of files only
  * panic: d.nx != 0 in crypto/md5.(*digest).checkSum
  * read tcp: i/o timeout

It turned out to be a combination of two upstream bugs

  * 5a2187309e
  * https://groups.google.com/forum/#!topic/golang-dev/0Nl6k5Sj6UU

This commit contains a work-around for the second problem, I've fixed
the first and had the change accepted upstream.
2015-06-09 17:32:45 +01:00
Nick Craig-Wood
97dced6a0b Don't check md5sum after download with --size-only - fixes #75 2015-06-09 13:18:40 +01:00
Nick Craig-Wood
e04acb09ce Version v1.15 2015-06-06 15:45:00 +01:00
Nick Craig-Wood
87ed7fc932 Document rclone's limitations with directories - fixes #69 2015-06-06 14:33:08 +01:00
Nick Craig-Wood
90744301d3 Fix package docs so they appear in godoc correctly 2015-06-06 14:24:30 +01:00
Nick Craig-Wood
bf4879f57f Expand docs and remove duplicated information 2015-06-06 14:17:14 +01:00
Nick Craig-Wood
e22b445cff Implement --size-only flag to sync on size not checksum & modtime - fixes #75 2015-06-06 08:49:01 +01:00
Nick Craig-Wood
5ab7970e18 dropbox: update docs about case insensitivity - see #53 2015-06-06 08:44:09 +01:00
Nick Craig-Wood
e984eeedc4 Add Alex Couper to contributors 2015-06-05 20:12:56 +01:00
Nick Craig-Wood
968b5a0984 Update notes 2015-06-05 20:00:36 +01:00
Alex Couper
7af1282375 Add --checksum flag to only discard transfers by MD5SUM - fixes #61
Useful for copying between backends where checksum fetching is fast,
ie any of s3, swift, drive or googlecloudstorage.
2015-06-05 19:46:03 +01:00
Nick Craig-Wood
d9fcc32f70 Version v1.14 2015-05-21 20:13:40 +01:00
Nick Craig-Wood
870a9fc3b2 local: fix encoding of non utf-8 file names - fixes #66 2015-05-21 18:40:16 +01:00
Nick Craig-Wood
8e3703abeb drive: docs about rate limiting 2015-05-21 18:39:46 +01:00
Nick Craig-Wood
ba81277bbe google cloud storage: Fix compile after API change in "google.golang.org/api/storage/v1"
The breaking change is: google-api-go-generator: remove underscores from identifiers.
2015-05-19 08:18:26 +01:00
Nick Craig-Wood
88293a4b8a Version v1.13 2015-05-10 12:39:06 +01:00
Nick Craig-Wood
981104519e Revise documentation (especially sync) - fixes #39 2015-05-10 12:17:24 +01:00
Nick Craig-Wood
1d254a3674 Implement --timeout and --conntimeout - fixes #54
NB dropbox still to do
2015-05-10 11:29:55 +01:00
Nick Craig-Wood
f88d171afd s3: ignore etags from multipart uploads which aren't md5sums - fixes #56 2015-05-10 11:29:55 +01:00
Nick Craig-Wood
ba2091725e Version v1.12 2015-03-15 15:55:38 +00:00
Nick Craig-Wood
7c120b8bc5 drive: add --drive-chunk-size and --drive-upload-cutoff parameters 2015-03-15 15:27:55 +00:00
Nick Craig-Wood
5cc5429f99 drive: switch to insert from update when a failed copy deletes the upload 2015-03-15 15:27:55 +00:00
Nick Craig-Wood
09d71239b6 Make file size render more neatly and prevent from being < 0 2015-03-15 15:27:55 +00:00
Nick Craig-Wood
c643e4585e core: Log duplicate files if they are detected 2015-03-15 15:27:55 +00:00
Nick Craig-Wood
873db29391 Log all objects more informatively 2015-03-15 15:27:55 +00:00
Nick Craig-Wood
81a933ae38 drive: Use chunked upload for files - fixes #33 2015-03-15 15:27:55 +00:00
Nick Craig-Wood
ecb3c7bcef drive, googlecloudstorage: remove SeekWrapper after googleapi fix 2015-03-04 20:47:59 +00:00
Nick Craig-Wood
80000b904c Version v1.11 2015-03-04 17:59:31 +00:00
Nick Craig-Wood
c47c9cd440 swift: add region parameter - fixes #38 2015-03-04 17:09:53 +00:00
Nick Craig-Wood
b4a0941d4c In remote paths, change native directory separators to / - fixes #37 2015-03-02 17:04:34 +00:00
Nick Craig-Wood
c03d6a1ec3 drive: fix crash on failed to update remote mtime - fixes #36 2015-03-02 09:25:33 +00:00
Nick Craig-Wood
46d39ebaf7 Factor Mime Type guessing into fs.MimeType() 2015-03-02 09:21:15 +00:00
Nick Craig-Wood
fe68737268 Fix niggles found by go vet 2015-02-28 15:35:54 +00:00
Nick Craig-Wood
2360bf907a Add synchronization to list output to stop corruptions - fixes #29 2015-02-28 15:30:40 +00:00
Nick Craig-Wood
aa093e991e Ensure all stats/log messages to go stderr - fixes #30 2015-02-28 14:39:00 +00:00
Nick Craig-Wood
a5974999eb Update docs - fixes #32 2015-02-28 14:15:47 +00:00
Nick Craig-Wood
24a6ff54c2 Add --log-file flag to log everything (including panics) to file 2015-02-28 08:10:20 +00:00
Nick Craig-Wood
e89ea3360e Make it possible to disable stats printing with --stats=0 2015-02-27 15:22:26 +00:00
Nick Craig-Wood
85f8552c4d Tidy logging 2015-02-27 15:22:05 +00:00
Nick Craig-Wood
a287e3ced7 Implement --bwlimit to limit data transfer bandwidth 2015-02-27 15:03:47 +00:00
Nick Craig-Wood
8e4d8d13b8 drive: rename internal api 2015-02-20 09:51:07 +00:00
Nick Craig-Wood
cf208ad21b Version v1.10 2015-02-12 18:00:20 +00:00
Nick Craig-Wood
0faed16899 s3: list an unlimited number of items - fixes #22 2015-02-10 17:58:29 +00:00
Nick Craig-Wood
8d1c0ad07c Fix config loop - fixes #25 2015-02-10 16:48:04 +00:00
Nick Craig-Wood
165e89c266 Version v1.09 2015-02-07 22:44:23 +00:00
Nick Craig-Wood
b4e19cfd62 windows: make tests work properly 2015-02-07 22:32:51 +00:00
Nick Craig-Wood
20ad96f3cd windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
This was done by stopping the user configuring single letter remotes
and making sure we don't treat single letter remotes as a remote name,
but as a drive letter.
2015-02-07 22:32:51 +00:00
Nick Craig-Wood
d64a37772f local: Fix directory separators on Windows - fixes #24 2015-02-07 22:32:51 +00:00
Nick Craig-Wood
5fb6f94579 drive: fix rate limit exceeded errors - fixes #20
This is done by pacing the requests to drive and backing them off
using an exponential increase.  Put and Modify operations can now be
retried also.
2015-02-07 22:32:51 +00:00
Nick Craig-Wood
20535348db Update docs to remove obsolete bug - fixes #21 2015-02-07 22:32:51 +00:00
Nick Craig-Wood
3d83a265c5 Update notes 2015-02-05 22:44:02 +00:00
Nick Craig-Wood
18a8a61cc5 Release v1.08 2015-02-04 22:31:56 +00:00
Nick Craig-Wood
1758621a51 drive: fix subdirectory listing to not list entire drive - fixes #23
This was causing inexplicably slow transfers on subdirectories of
drives with lots of files.
2015-02-04 22:22:03 +00:00
Nick Craig-Wood
5710247bf6 drive: Fix SetModTime 2015-01-04 23:19:59 +00:00
Nick Craig-Wood
78b03929b7 Fix ModTime test 2015-01-04 16:57:55 +00:00
Nick Craig-Wood
492362ec7d Catch nil from List() in tests 2015-01-04 16:23:22 +00:00
Nick Craig-Wood
51b24a1dc6 dropbox: adapt code to recent library changes 2014-12-23 13:55:22 +00:00
Nick Craig-Wood
cfdb48c864 Version v1.07 2014-12-23 11:26:32 +00:00
Nick Craig-Wood
14567952b3 google cloud storage: Fix memory leak - fixes #17
This was the same problem as issue #5 (which affected google drive)
2014-12-23 11:03:34 +00:00
Nick Craig-Wood
2b052671e2 swift: Add docs for tentant 2014-12-12 20:38:35 +00:00
Nick Craig-Wood
439a126af6 Version v1.06 2014-12-12 20:13:03 +00:00
Nick Craig-Wood
0fb35f081a Use new location of Google API package - fixes #16 2014-12-12 20:02:08 +00:00
Nick Craig-Wood
9ba25c7219 Test with go 1.4 too 2014-12-12 19:27:14 +00:00
Nick Craig-Wood
af9c447146 Fix "Couldn't find home directory" on OSX - fixes #15 2014-12-12 19:18:23 +00:00
Nick Craig-Wood
ee6b39aa6c Add tenant parameter for swift - fixes #13 2014-12-12 15:26:08 +00:00
Nick Craig-Wood
839133c5e1 Version v1.05 2014-08-09 17:22:17 +01:00
Nick Craig-Wood
f4eb48e531 Fix test incantation 2014-08-09 17:18:17 +01:00
Nick Craig-Wood
18439cf2d7 Move rclonetest into go tests for fs module 2014-08-03 11:18:25 +01:00
Nick Craig-Wood
d3c16608e4 Test Listing the Root of each Fs 2014-07-31 23:20:39 +01:00
Nick Craig-Wood
3e27ff1b95 Add Root List test and fs.Limited tests for single files 2014-07-31 21:35:29 +01:00
Nick Craig-Wood
ff91698fb5 Skip tests if test remote not configured 2014-07-31 08:51:39 +01:00
Nick Craig-Wood
c389616657 all: make private functions / variables / constant which shouldn't be public 2014-07-29 17:50:07 +01:00
Nick Craig-Wood
442578ca25 drive: reset root directory on Rmdir and Purge 2014-07-29 17:32:06 +01:00
Nick Craig-Wood
0b51d6221a s3: make reading metadata more reliable to work around eventual consistency problems 2014-07-29 17:32:06 +01:00
Nick Craig-Wood
2f9f9afac2 fs: Document that Purger returns error on empty directory, test and fix 2014-07-29 17:18:22 +01:00
Nick Craig-Wood
9711a5d647 google cloud storage: re-read metadata in SetModTime 2014-07-29 17:18:22 +01:00
Nick Craig-Wood
cc679aa714 google cloud storage: fix ListDir on subdirectory 2014-07-29 17:18:22 +01:00
Nick Craig-Wood
457ef2c190 Automatically generate the tests files for each Fs 2014-07-29 17:18:22 +01:00
Nick Craig-Wood
17ffb0855f Fixes after running errcheck 2014-07-25 18:19:49 +01:00
Nick Craig-Wood
125fc8f1f0 s3: strip trailing / from ListDir() 2014-07-24 23:13:33 +01:00
Nick Craig-Wood
1660903aa2 local: fix unit tests
* Change log.Printf into fs.Log
  * Re-read metadata on SetModtime
2014-07-24 23:13:33 +01:00
Nick Craig-Wood
b013c58537 swift: return directories without / in ListDir 2014-07-24 23:13:33 +01:00
Nick Craig-Wood
a5b0d88608 Make tests for each Fs
Factor tests out of rclonetest
2014-07-24 23:13:32 +01:00
Nick Craig-Wood
02d50f8c6e local: remove annoying debug message 2014-07-22 23:06:01 +01:00
Nick Craig-Wood
e09ef62d5b core: Fix race detected by go race detector 2014-07-22 23:03:14 +01:00
Nick Craig-Wood
a75bc0703f Version 1.04 2014-07-21 21:32:37 +01:00
Nick Craig-Wood
80ecea82e8 google cloud storage: Fix crash on Update error - fixes #9 2014-07-21 21:25:46 +01:00
Nick Craig-Wood
54cd46372a Version 1.03 2014-07-20 11:28:50 +01:00
Nick Craig-Wood
282cba20a0 swift, s3, dropbox: fix metadata read on Update()
This was causing changed files to be marked as corrupted on upload
2014-07-20 11:23:05 +01:00
Nick Craig-Wood
2479ce2c8e dropbox: go1.1 compatibility 2014-07-19 15:48:40 +01:00
Nick Craig-Wood
9aa4b6bd9b Version 1.02 2014-07-19 13:24:48 +01:00
Nick Craig-Wood
6c10024420 rclonetest: add --subdir flag for testing with a sub directory
Also add a test script for testing all the remotes
2014-07-19 13:07:56 +01:00
Nick Craig-Wood
e559194fb2 fs: Verify sizes are the same after transfer in Copy() 2014-07-19 13:05:07 +01:00
Nick Craig-Wood
1c472348b6 s3: Read metadata after Update or Put 2014-07-19 13:05:07 +01:00
Nick Craig-Wood
5a8bce6353 swift: Read metadata after Update or Put 2014-07-19 13:05:06 +01:00
Nick Craig-Wood
f9b31591f9 drive: Flush directory cache on Purge 2014-07-19 13:05:06 +01:00
Nick Craig-Wood
1527e64ee7 local: Implement Purger interface 2014-07-19 13:05:01 +01:00
Nick Craig-Wood
f7652db4f1 local: Make sure info is never nil 2014-07-19 11:50:11 +01:00
Nick Craig-Wood
8b75fb14c5 local: calculate md5sum on Read or Update since we check it in Copy() 2014-07-19 11:06:25 +01:00
Nick Craig-Wood
07f9a1a9f0 Update docs for Dropbox and Google Cloud Storage 2014-07-18 10:10:08 +01:00
Nick Craig-Wood
7d8bac2711 google cloud storage: fix merge conflict
Conflicts:
	rclone.go
	rclonetest/rclonetest.go
2014-07-16 12:21:01 +01:00
Nick Craig-Wood
cad9479a00 google cloud storage: Update metadata on Put since we get it back 2014-07-16 12:12:36 +01:00
Nick Craig-Wood
dfc8a375f6 dropbox: Switch to using RFC3339 for time metadata 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
7c9bdb4b7a dropbox: make limited fs work (copy single file) 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
f8bb0d9cc8 dropbox: remove metadata when we remove files 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
b185e104ed dropbox: Fix mkdir on already created directory 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
e57a4c7c0c dropbox: open the datastore in the background 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
d2f187e1a1 dropbox: Use /delta to list objects - much quicker
Also fix major performance problem - re-reading entry each time!
2014-07-15 19:27:42 +01:00
Nick Craig-Wood
c9aca33030 dropbox: Fix concurrent access to Dropbox datastore and Lower case keys in datastore 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
2b0911531c dropbox: basics of metadata in Dropbox datastore working 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
2149185fc2 dropbox: Initial support of full Fs interface
Still missing metadata support (eg SetModTime)
2014-07-15 19:27:42 +01:00
Nick Craig-Wood
0159da9f37 dropbox: graphics for the Dropbox app (used in auth process) 2014-07-15 19:27:41 +01:00
Nick Craig-Wood
680283d69f google cloud storage: fix download of files in sub directories 2014-07-15 19:27:31 +01:00
Nick Craig-Wood
c71f339e01 google cloud storage: implement ACLs and delete 2014-07-15 19:27:31 +01:00
Nick Craig-Wood
c91c96565f google cloud storage: set the Content-Type from the file name 2014-07-15 19:27:31 +01:00
Nick Craig-Wood
b72fc69fbe google cloud storage: Make operations on single files work 2014-07-15 19:27:30 +01:00
Nick Craig-Wood
a1732c21d8 google cloud storage: Initial support for full Fs interface 2014-07-15 19:27:30 +01:00
Nick Craig-Wood
b83441081c drive: factor common authentication code into googleauth module
In preparation for Google Cloud Storage support
2014-07-15 19:27:30 +01:00
Nick Craig-Wood
8a76568ea8 core: Verify MD5 sums after each transfer 2014-07-15 19:27:05 +01:00
Nick Craig-Wood
c4dc9d273a rclonetest: check sub directory and downloads 2014-07-15 13:28:48 +01:00
Nick Craig-Wood
66cf2df780 drive: check errors in Open() better 2014-07-15 13:28:35 +01:00
Nick Craig-Wood
c1a245d1c8 Factor UserAgent to fs and move Version to fs 2014-07-13 19:19:58 +01:00
Nick Craig-Wood
e40b09fe61 drive: Fix comment 2014-07-13 10:54:35 +01:00
Nick Craig-Wood
eb2b4ea8aa rclone: Don't purge if --dry-run set 2014-07-13 10:54:30 +01:00
Nick Craig-Wood
e055ed0489 rclone: change "ls" and add "lsl" and "md5sum" commands
Changed "ls" command not to show modification time by default only
size and path.  That is because it is slow for nearly all the remotes
as it requires extra metadata lookup.  All remotes can look up files
and sizes without extra operations.

Added "lsl" which does what "ls" used to - namely show modification
time, size and path.

Added "md5sum" which produces the same output as the md5sum command -
md5sums and paths that is.
2014-07-12 12:09:20 +01:00
Nick Craig-Wood
dd6d7cad3a Notes about storage systems 2014-07-09 20:50:08 +01:00
Nick Craig-Wood
37b2274e10 Version 1.01 2014-07-04 21:15:27 +01:00
Nick Craig-Wood
91cfbd4146 drive: fix transfer of big files using up lots of memory - fixes #5
This was done by making a seekWrapper which wraps an io.Reader with a
basic Seek for code.google.com/p/google-api-go-client/googleapi to
detect the length.  Without this the getReaderSize function reads the
entire file into memory to find its length.
2014-07-04 17:17:21 +01:00
Nick Craig-Wood
d4817399ff Fix release procedure 2014-07-03 22:01:25 +01:00
Nick Craig-Wood
48d259da68 Version 1.00 2014-07-03 21:56:54 +01:00
Nick Craig-Wood
f86fa6a062 Fix make tag 2014-07-03 21:43:14 +01:00
Nick Craig-Wood
93cb0a47e4 drive: fix whole second dates - fixes #4 2014-07-03 21:32:01 +01:00
Nick Craig-Wood
a12760c038 Travis build correction - fixes #2 2014-06-26 19:05:48 +01:00
Nick Craig-Wood
fdcd6a3a4c Add travis build file 2014-06-26 18:23:54 +01:00
Nick Craig-Wood
cb7891f4ff Fix retag 2014-06-26 17:57:32 +01:00
61 changed files with 5417 additions and 1161 deletions

5
.gitignore vendored
View File

@@ -4,6 +4,7 @@ rclone
rclonetest/rclonetest
build
docs/public
README.html
README.txt
MANUAL.md
MANUAL.html
MANUAL.txt
rclone.1

12
.travis.yml Normal file
View File

@@ -0,0 +1,12 @@
language: go
go:
- 1.1.2
- 1.2.2
- 1.3.3
- 1.4
- tip
script:
- go get ./...
- go test -v ./...

View File

@@ -1,40 +1,47 @@
TAG := $(shell git describe --tags)
LAST_TAG := $(shell git describe --tags --abbrev=0)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = "v" . $$_')
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
rclone: *.go */*.go
rclone:
@go version
go build
go install -v ./...
doc: rclone.1 README.html README.txt
test: rclone
go test ./...
cd fs && ./test_all.sh
rclone.1: README.md
pandoc -s --from markdown --to man README.md -o rclone.1
doc: rclone.1 MANUAL.html MANUAL.txt
README.html: README.md
pandoc -s --from markdown_github --to html README.md -o README.html
rclone.1: MANUAL.md
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
README.txt: README.md
pandoc -s --from markdown_github --to plain README.md -o README.txt
MANUAL.md: make_manual.py docs/content/*.md
./make_manual.py
MANUAL.html: MANUAL.md
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
install: rclone
install -d ${DESTDIR}/usr/bin
install -t ${DESTDIR}/usr/bin rclone
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
clean:
go clean ./...
find . -name \*~ | xargs -r rm -f
rm -rf build docs/public
rm -f rclone rclonetest/rclonetest rclone.1 README.html README.txt
rm -f rclone rclonetest/rclonetest rclone.1 MANUAL.md MANUAL.html MANUAL.txt
website:
cd docs && hugo
upload_website: website
./rclone -v sync docs/public memstore:www-rclone-org
rclone -v sync docs/public memstore:www-rclone-org
upload:
./rclone -v copy build/ memstore:downloads-rclone-org
rclone -v copy build/ memstore:downloads-rclone-org
cross: doc
./cross-compile $(TAG)
@@ -45,17 +52,18 @@ serve:
tag:
@echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEW_TAG)"
echo -e "package main\n const Version = \"$(NEW_TAG)\"\n" | gofmt > version.go
cp -av version.go rclonetest/version.go
echo -e "package fs\n const Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
git tag $(NEW_TAG)
@echo "Add this to changelog in README.md"
@echo " * $(NEW_TAG) - " `date -I`
@echo "Add this to changelog in docs/content/changelog.md"
@echo " * $(NEW_TAG) -" `date -I`
@git log $(LAST_TAG)..$(NEW_TAG) --oneline
@echo "Then commit the changes"
@echo git commit -m "Version $(NEW_TAG)" -a -v
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo "And finally run make retag before make cross etc"
retag:
echo git tag -f $(LAST_TAG)
git tag -f $(LAST_TAG)
gen_tests:
cd fstest/fstests && go run gen_tests.go

251
README.md
View File

@@ -1,17 +1,19 @@
% rclone(1) User Manual
% Nick Craig-Wood
% Apr 24, 2014
Rclone
======
[![Logo](http://rclone.org/img/rclone-120x120.png)](http://rclone.org/)
[Website](http://rclone.org) |
[Documentation](http://rclone.org/docs/) |
[Installation](http://rclone.org/install/) |
[G+](https://google.com/+RcloneOrg)
[![Build Status](https://travis-ci.org/ncw/rclone.png)](https://travis-ci.org/ncw/rclone) [![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
Rclone is a command line program to sync files and directories to and from
* Google Drive
* Amazon S3
* Openstack Swift / Rackspace cloud files / Memset Memstore
* Dropbox
* Google Cloud Storage
* The local filesystem
Features
@@ -24,244 +26,13 @@ Features
* Check mode to check all MD5SUMs
* Can sync to and from network, eg two different Drive accounts
See the Home page for more documentation and configuration walkthroughs.
See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
* http://rclone.org/
Install
-------
Rclone is a Go program and comes as a single binary file.
Download the binary for your OS from
* http://rclone.org/downloads/
Or alternatively if you have Go installed use
go install github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`.
Configure
---------
First you'll need to configure rclone. As the object storage systems
have quite complicated authentication these are kept in a config file
`.rclone.conf` in your home directory by default. (You can use the
`--config` option to choose a different config file.)
The easiest way to make the config is to run rclone with the config
option, Eg
rclone config
Usage
-----
Rclone syncs a directory tree from local to remote.
Its basic syntax is
Syntax: [options] subcommand <parameters> <parameters...>
See below for how to specify the source and destination paths.
Subcommands
-----------
rclone copy source:path dest:path
Copy the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Doesn't delete files from the destination.
rclone sync source:path dest:path
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the `--dry-run` flag.
rclone ls [remote:path]
List all the objects in the the path.
rclone lsd [remote:path]
List all directories/objects/buckets in the the path.
rclone mkdir remote:path
Make the path if it doesn't already exist
rclone rmdir remote:path
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.
rclone purge remote:path
Remove the path and all of its contents.
rclone check source:path dest:path
Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
General options:
```
--checkers=8: Number of checkers to run in parallel.
--config="~/.rclone.conf": Config file.
-n, --dry-run=false: Do a trial run with no permanent changes
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats
--transfers=4: Number of file transfers to run in parallel.
-v, --verbose=false: Print lots more stuff
```
Developer options:
```
--cpuprofile="": Write cpu profile to file
```
Local Filesystem
----------------
Paths are specified as normal filesystem paths, so
rclone sync /home/source /tmp/destination
Will sync `/home/source` to `/tmp/destination`
Swift / Rackspace cloudfiles / Memset Memstore
----------------------------------------------
Paths are specified as remote:container (or remote: for the `lsd`
command.) You may put subdirectories in too, eg
`remote:container/path/to/dir`.
So to copy a local directory to a swift container called backup:
rclone sync /home/source swift:backup
The modified time is stored as metadata on the object as
`X-Object-Meta-Mtime` as floating point since the epoch.
This is a defacto standard (used in the official python-swiftclient
amongst others) for storing the modification time (as read using
os.Stat) for an object.
Amazon S3
---------
Paths are specified as remote:bucket. You may put subdirectories in
too, eg `remote:bucket/path/to/dir`.
So to copy a local directory to a s3 container called backup
rclone sync /home/source s3:backup
The modified time is stored as metadata on the object as
`X-Amz-Meta-Mtime` as floating point since the epoch.
Google drive
------------
Paths are specified as drive:path Drive paths may be as deep as required.
The initial setup for drive involves getting a token from Google drive
which you need to do in your browser. `rclone config` walks you
through it.
To copy a local directory to a drive directory called backup
rclone copy /home/source drv:backup
Google drive stores modification times accurate to 1 ms.
Single file copies
------------------
Rclone can copy single files
rclone src:path/to/file dst:path/dir
Or
rclone src:path/to/file dst:path/to/file
Note that you can't rename the file if you are copying from one file to another.
License
-------
This is free software under the terms of MIT the license (check the
COPYING file included in this package).
Bugs
----
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work
Changelog
---------
* v0.99 - 2014-06-26
* Fix --dry-run not working
* Make compatible with go 1.1
* v0.98 - 2014-05-30
* s3: Treat missing Content-Length as 0 for some ceph installations
* rclonetest: add file with a space in
* v0.97 - 2014-05-05
* Implement copying of single files
* s3 & swift: support paths inside containers/buckets
* v0.96 - 2014-04-24
* drive: Fix multiple files of same name being created
* drive: Use o.Update and fs.Put to optimise transfers
* Add version number, -V and --version
* v0.95 - 2014-03-28
* rclone.org: website, docs and graphics
* drive: fix path parsing
* v0.94 - 2014-03-27
* Change remote format one last time
* GNU style flags
* v0.93 - 2014-03-16
* drive: store token in config file
* cross compile other versions
* set strict permissions on config file
* v0.92 - 2014-03-15
* Config fixes and --config option
* v0.91 - 2014-03-15
* Make config file
* v0.90 - 2013-06-27
* Project named rclone
* v0.00 - 2012-11-18
* Project started
Contact and support
-------------------
The project website is at:
* https://github.com/ncw/rclone
There you can file bug reports, ask for help or send pull requests.
Authors
-------
* Nick Craig-Wood <nick@craig-wood.com>
Contributors
------------
* Your name goes here!

19
RELEASE.md Normal file
View File

@@ -0,0 +1,19 @@
Required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages
* [gox](https://github.com/mitchellh/gox) for cross compiling
* Run `gox -build-toolchain`
* This assumes you have your own source checkout
* pandoc for making the html and man pages
Making a release
* go get -u -f -v ./...
* make test
* make tag
* edit docs/content/changelog.md
* git commit -a -v
* make retag
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross
* make cross
* make upload
* make upload_website
* git push --tags origin master

View File

@@ -21,8 +21,8 @@ mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
cd build
for d in `ls`; do
cp -a ../README.txt $d/
cp -a ../README.html $d/
cp -a ../MANUAL.txt $d/README.txt
cp -a ../MANUAL.html $d/README.html
cp -a ../rclone.1 $d/
zip -r9 $d.zip $d
rm -rf $d

View File

@@ -5,7 +5,7 @@
"menu": "menu"
},
"baseurl": "http://rclone.org",
"title": "rclone - rsync for object storage",
"description": "rclone - rsync for object storage: google drive, s3, swift, cloudfiles, memstore...",
"title": "rclone - rsync for cloud storage",
"description": "rclone - rsync for cloud storage: google drive, s3, swift, cloudfiles, dropbox, memstore...",
"canonifyurls": true
}

View File

@@ -1,8 +1,8 @@
---
title: "Rclone"
description: "rclone syncs files to and from Google Drive, S3, Swift and Cloudfiles."
description: "rclone syncs files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox and Google Cloud Storage."
type: page
date: "2014-04-26"
date: "2014-07-17"
groups: ["about"]
---
@@ -16,6 +16,8 @@ Rclone is a command line program to sync files and directories to and from
* Google Drive
* Amazon S3
* Openstack Swift / Rackspace cloud files / Memset Memstore
* Dropbox
* Google Cloud Storage
* The local filesystem
Features

15
docs/content/authors.md Normal file
View File

@@ -0,0 +1,15 @@
---
title: "Authors"
description: "Rclone Authors and Contributors"
date: "2014-06-16"
---
Authors
-------
* Nick Craig-Wood <nick@craig-wood.com>
Contributors
------------
* Alex Couper <amcouper@gmail.com>

28
docs/content/bugs.md Normal file
View File

@@ -0,0 +1,28 @@
---
title: "Bugs"
description: "Rclone Bugs and Limitations"
date: "2014-06-16"
---
Bugs and Limitations
--------------------
### Empty directories are left behind / not created ##
With remotes that have a concept of directory, eg Local and Drive,
empty directories may be left behind, or not created when one was
expected.
This is because rclone doesn't have a concept of a directory - it only
works on objects. Most of the object storage systems can't actually
store a directory so there is nowhere for rclone to store anything
about directories.
You can work round this to some extent with the`purge` command which
will delete everything under the path, **inluding** empty directories.
### Directory timestamps aren't preserved ##
For the same reason as the above, rclone doesn't have a concept of a
directory - it only works on objects, therefore it can't preserve the
timestamps of directories.

115
docs/content/changelog.md Normal file
View File

@@ -0,0 +1,115 @@
---
title: "Documentation"
description: "Rclone Changelog"
date: "2015-06-09"
---
Changelog
---------
* v1.16 - 2015-06-09
* Fix uploading big files which was causing timeouts or panics
* Don't check md5sum after download with --size-only
* v1.15 - 2015-06-06
* Add --checksum flag to only discard transfers by MD5SUM - thanks Alex Couper
* Implement --size-only flag to sync on size not checksum & modtime
* Expand docs and remove duplicated information
* Document rclone's limitations with directories
* dropbox: update docs about case insensitivity
* v1.14 - 2015-05-21
* local: fix encoding of non utf-8 file names - fixes a duplicate file problem
* drive: docs about rate limiting
* google cloud storage: Fix compile after API change in "google.golang.org/api/storage/v1"
* v1.13 - 2015-05-10
* Revise documentation (especially sync)
* Implement --timeout and --conntimeout
* s3: ignore etags from multipart uploads which aren't md5sums
* v1.12 - 2015-03-15
* drive: Use chunked upload for files above a certain size
* drive: add --drive-chunk-size and --drive-upload-cutoff parameters
* drive: switch to insert from update when a failed copy deletes the upload
* core: Log duplicate files if they are detected
* v1.11 - 2015-03-04
* swift: add region parameter
* drive: fix crash on failed to update remote mtime
* In remote paths, change native directory separators to /
* Add synchronization to ls/lsl/lsd output to stop corruptions
* Ensure all stats/log messages to go stderr
* Add --log-file flag to log everything (including panics) to file
* Make it possible to disable stats printing with --stats=0
* Implement --bwlimit to limit data transfer bandwidth
* v1.10 - 2015-02-12
* s3: list an unlimited number of items
* Fix getting stuck in the configurator
* v1.09 - 2015-02-07
* windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
* local: Fix directory separators on Windows
* drive: fix rate limit exceeded errors
* v1.08 - 2015-02-04
* drive: fix subdirectory listing to not list entire drive
* drive: Fix SetModTime
* dropbox: adapt code to recent library changes
* v1.07 - 2014-12-23
* google cloud storage: fix memory leak
* v1.06 - 2014-12-12
* Fix "Couldn't find home directory" on OSX
* swift: Add tenant parameter
* Use new location of Google API packages
* v1.05 - 2014-08-09
* Improved tests and consequently lots of minor fixes
* core: Fix race detected by go race detector
* core: Fixes after running errcheck
* drive: reset root directory on Rmdir and Purge
* fs: Document that Purger returns error on empty directory, test and fix
* google cloud storage: fix ListDir on subdirectory
* google cloud storage: re-read metadata in SetModTime
* s3: make reading metadata more reliable to work around eventual consistency problems
* s3: strip trailing / from ListDir()
* swift: return directories without / in ListDir
* v1.04 - 2014-07-21
* google cloud storage: Fix crash on Update
* v1.03 - 2014-07-20
* swift, s3, dropbox: fix updated files being marked as corrupted
* Make compile with go 1.1 again
* v1.02 - 2014-07-19
* Implement Dropbox remote
* Implement Google Cloud Storage remote
* Verify Md5sums and Sizes after copies
* Remove times from "ls" command - lists sizes only
* Add add "lsl" - lists times and sizes
* Add "md5sum" command
* v1.01 - 2014-07-04
* drive: fix transfer of big files using up lots of memory
* v1.00 - 2014-07-03
* drive: fix whole second dates
* v0.99 - 2014-06-26
* Fix --dry-run not working
* Make compatible with go 1.1
* v0.98 - 2014-05-30
* s3: Treat missing Content-Length as 0 for some ceph installations
* rclonetest: add file with a space in
* v0.97 - 2014-05-05
* Implement copying of single files
* s3 & swift: support paths inside containers/buckets
* v0.96 - 2014-04-24
* drive: Fix multiple files of same name being created
* drive: Use o.Update and fs.Put to optimise transfers
* Add version number, -V and --version
* v0.95 - 2014-03-28
* rclone.org: website, docs and graphics
* drive: fix path parsing
* v0.94 - 2014-03-27
* Change remote format one last time
* GNU style flags
* v0.93 - 2014-03-16
* drive: store token in config file
* cross compile other versions
* set strict permissions on config file
* v0.92 - 2014-03-15
* Config fixes and --config option
* v0.91 - 2014-03-15
* Make config file
* v0.90 - 2013-06-27
* Project named rclone
* v0.00 - 2012-11-18
* Project started

View File

@@ -5,8 +5,17 @@ date: "2014-04-26"
---
Contact the rclone project
--------------------------
The project website is at:
* https://github.com/ncw/rclone
There you can file bug reports, ask for help or contribute pull
requests.
See also
* [Github project page for source, reporting bugs and pull requests](http://github.com/ncw/rclone)
* <a href="https://google.com/+RcloneOrg" rel="publisher">Google+ page for general comments</a></li>
Or email [Nick Craig-Wood](mailto:nick@craig-wood.com)

View File

@@ -1,22 +1,9 @@
---
title: "Documentation"
description: "Rclone Documentation"
date: "2014-04-26"
description: "Rclone Usage"
date: "2015-06-06"
---
Install
-------
Rclone is a Go program and comes as a single binary file.
[Download](/downloads/) the relevant binary.
Or alternatively if you have Go installed use
go get github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`.
Configure
---------
@@ -30,11 +17,13 @@ option:
rclone config
See below for detailed instructions for
See the following for detailed instructions for
* [Google drive](/drive/)
* [Amazon S3](/s3/)
* [Swift / Rackspace Cloudfiles / Memset Memstore](/swift/)
* [Dropbox](/dropbox/)
* [Google Cloud Storage](/googlcloudstorage/)
* [Local filesystem](/local/)
Usage
@@ -55,96 +44,216 @@ You can define as many storage paths as you like in the config file.
Subcommands
-----------
rclone copy source:path dest:path
### rclone copy source:path dest:path ###
Copy the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
unchanged files, testing by size and modification time or
MD5SUM. Doesn't delete files from the destination.
rclone sync source:path dest:path
### rclone sync source:path dest:path ###
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the -dry-run flag.
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the `--dry-run` flag.
rclone ls [remote:path]
### rclone ls [remote:path] ###
List all the objects in the the path.
List all the objects in the the path with size and path.
rclone lsd [remote:path]
### rclone lsd [remote:path] ###
List all directoryes/objects/buckets in the the path.
List all directories/containers/buckets in the the path.
rclone mkdir remote:path
### rclone lsl [remote:path] ###
List all the objects in the the path with modification time,
size and path.
### rclone md5sum [remote:path] ###
Produces an md5sum file for all the objects in the path. This
is in the same format as the standard md5sum tool produces.
### rclone mkdir remote:path ###
Make the path if it doesn't already exist
rclone rmdir remote:path
### rclone rmdir remote:path ###
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.
rclone purge remote:path
### rclone purge remote:path ###
Remove the path and all of its contents.
rclone check source:path dest:path
### rclone check source:path dest:path ###
Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
General options:
### rclone config ###
```
--checkers=8: Number of checkers to run in parallel.
--transfers=4: Number of file transfers to run in parallel.
--config="~/.rclone.conf": Config file.
-n, --dry-run=false: Do a trial run with no permanent changes
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats
-v, --verbose=false: Print lots more stuff
```
Enter an interactive configuration session.
Developer options:
### rclone help ###
```
--cpuprofile="": Write cpu profile to file
```
Prints help on rclone commands and options.
License
Options
-------
This is free software under the terms of MIT the license (check the
COPYING file included in this package).
Rclone has a number of options to control its behaviour.
Bugs
----
Options which use TIME use the go time parser. A duration string is a
possibly signed sequence of decimal numbers, each with optional
fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
* Doesn't sync individual files yet, only directories.
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work
Options which use SIZE use kByte by default. However a suffix of `k`
for kBytes, `M` for MBytes and `G` for GBytes may be used. These are
the binary units, eg 2**10, 2**20, 2**30 respectively.
Contact and support
-------------------
### --bwlimit=SIZE ###
The project website is at:
Bandwidth limit in kBytes/s, or use suffix k|M|G. The default is `0`
which means to not limit bandwidth.
* https://github.com/ncw/rclone
For example to limit bandwidth usage to 10 MBytes/s use `--bwlimit 10M`
There you can file bug reports, ask for help or contribute patches.
This only limits the bandwidth of the data transfer, it doesn't limit
the bandwith of the directory listings etc.
Authors
-------
### --checkers=N ###
* Nick Craig-Wood <nick@craig-wood.com>
The number of checkers to run in parallel. Checkers do the equality
checking of files during a sync. For some storage systems (eg s3,
swift, dropbox) this can take a significant amount of time so they are
run in parallel.
Contributors
------------
The default is to run 8 checkers in parallel.
* Your name goes here!
### -c, --checksum ###
Normally rclone will look at modification time and size of files to
see if they are equal. If you set this flag then rclone will check
MD5SUM and size to determine if files are equal.
This is very useful when transferring between remotes which store the
MD5SUM on the object which include swift, s3, drive, and google cloud
storage.
Eg `rclone --checksum sync s3:/bucket swift:/bucket` would run much
quicker than without the `--checksum` flag.
When using this flag, rclone won't update mtimes of remote files if
they are incorrect as it would normally.
### --config=CONFIG_FILE ###
Specify the location of the rclone config file. Normally this is in
your home directory as a file called `.rclone.conf`. If you run
`rclone -h` and look at the help for the `--config` option you will
see where the default location is for you. Use this flag to override
the config location, eg `rclone --config=".myconfig" .config`.
### --contimeout=TIME ###
Set the connection timeout. This should be in go time format which
looks like `5s` for 5 seconds, `10m` for 10 minutes, or `3h30m`.
The connection timeout is the amount of time rclone will wait for a
connection to go through to a remote object storage system. It is
`1m` by default.
### -n, --dry-run ###
Do a trial run with no permanent changes. Use this in combination
with the `-v` flag to see what rclone would do without actually doing
it. Useful when setting up the `sync` command.
### --log-file=FILE ###
Log all of rclone's output to FILE. This is not active by default.
This can be useful for tracking down problems with syncs in
combination with the `-v` flag.
### --modify-window=TIME ###
When checking whether a file has been modified, this is the maximum
allowed time difference that a file can have and still be considered
equivalent.
The default is `1ns` unless this is overridden by a remote. For
example OS X only stores modification times to the nearest second so
if you are reading and writing to an OS X filing system this will be
`1s` by default.
This command line flag allows you to override that computed default.
### -q, --quiet ###
Normally rclone outputs stats and a completion message. If you set
this flag it will make as little output as possible.
### --size-only ###
Normally rclone will look at modification time and size of files to
see if they are equal. If you set this flag then rclone will check
only the size.
This can be useful transferring files from dropbox which have been
modified by the desktop sync client which doesn't set checksums of
modification times in the same way as rclone.
When using this flag, rclone won't update mtimes of remote files if
they are incorrect as it would normally.
### --stats=TIME ###
Rclone will print stats at regular intervals to show its progress.
This sets the interval.
The default is `1m`. Use 0 to disable.
### --timeout=TIME ###
This sets the IO idle timeout. If a transfer has started but then
becomes idle for this long it is considered broken and disconnected.
The default is `5m`. Set to 0 to disable.
### --transfers=N ###
The number of file transfers to run in parallel. It can sometimes be
useful to set this to a smaller number if the remote is giving a lot
of timeouts or bigger if you have lots of bandwidth and a fast remote.
The default is to run 4 file transfers in parallel.
### -v, --verbose ###
If you set this flag, rclone will become very verbose telling you
about every file it considers and transfers.
Very useful for debugging.
### -V, --version ###
Prints the version number
Developer options
-----------------
These options are useful when developing or debugging rclone. There
are also some more remote specific options which aren't documented
here which are used for testing. These start with remote name eg
`--drive-test-option`.
### --cpuprofile=FILE ###
Write cpu profile to file. This can be analysed with `go tool pprof`.

View File

@@ -2,34 +2,34 @@
title: "Rclone downloads"
description: "Download rclone binaries for your OS."
type: page
date: "2014-06-26"
date: "2015-06-09"
---
Rclone Download v0.99
Rclone Download v1.16
=====================
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.99-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.99-windows-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.16-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.16-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.99-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.99-osx-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.16-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.16-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.99-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.99-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v0.99-linux-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.16-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.16-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.16-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.99-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.99-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v0.99-freebsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.16-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.16-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.16-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.99-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.99-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v0.99-netbsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.16-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.16-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.16-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.99-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.99-openbsd-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.16-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.16-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.99-plan9-386.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.16-plan9-386.zip)
Older downloads can be found [here](http://downloads.rclone.org/)

View File

@@ -1,13 +1,15 @@
---
title: "Google drive"
description: "Rclone docs for Google drive"
date: "2014-04-26"
date: "2015-05-10"
---
<i class="fa fa-google"></i> Google Drive
-----------------------------------------
Paths are specified as `drive:path`
Drive paths may be as deep as required, eg
`drive:directory/subdirectory`.
Drive paths may be as deep as required, eg `drive:directory/subdirectory`.
The initial setup for drive involves getting a token from Google drive
which you need to do in your browser. `rclone config` walks you
@@ -67,7 +69,25 @@ To copy a local directory to a drive directory called backup
rclone copy /home/source remote:backup
Modified time
-------------
### Modified time ###
Google drive stores modification times accurate to 1 ms.
### Revisions ###
Google drive stores revisions of files. When you upload a change to
an existing file to google drive using rclone it will create a new
revision of that file.
Revisions follow the standard google policy which at time of writing
was
* They are deleted after 30 days or 100 revisions (whatever comes first).
* They do not count towards a user storage quota.
### Limitations ###
Drive has quite a lot of rate limiting. This causes rclone to be
limited to transferring about 2 files per second only. Individual
files may be transferred much faster at 100s of MBytes/s but lots of
small files can take a long time.

91
docs/content/dropbox.md Normal file
View File

@@ -0,0 +1,91 @@
---
title: "Dropbox"
description: "Rclone docs for Dropbox"
date: "2014-07-17"
---
<i class="fa fa-dropbox"></i> Dropbox
---------------------------------
Paths are specified as `remote:path`
Dropbox paths may be as deep as required, eg
`remote:directory/subdirectory`.
The initial setup for dropbox involves getting a token from Dropbox
which you need to do in your browser. `rclone config` walks you
through it.
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) google cloud storage
5) dropbox
6) drive
type> 5
Dropbox App Key - leave blank to use rclone's.
app_key>
Dropbox App Secret - leave blank to use rclone's.
app_secret>
Remote config
Please visit:
https://www.dropbox.com/1/oauth2/authorize?client_id=XXXXXXXXXXXXXXX&response_type=code
Enter the code: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXXXXXXXX
--------------------
[remote]
app_key =
app_secret =
token = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
You can then use it like this,
List directories in top level of your dropbox
rclone lsd remote:
List all the files in your dropbox
rclone ls remote:
To copy a local directory to a dropbox directory called backup
rclone copy /home/source remote:backup
### Modified time ###
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
a Dropbox datastore called "rclone".
### Limitations ###
Dropbox datastores are limited to 100,000 rows so this is the maximum
number of files rclone can manage on Dropbox.
Dropbox is case sensitive which can sometimes cause duplicated files.
If you use the desktop sync tool and rclone on the same files then the
md5sums and modification times may get out of sync as far as rclone is
concerned. This will cause `Corrupted on transfer: md5sums differ`
error message when fetching files. You can work around this by using
the `--size-only` flag to ignore the md5sums and modification times
for these files.

View File

@@ -0,0 +1,116 @@
---
title: "Google Cloud Storage"
description: "Rclone docs for Google Cloud Storage"
date: "2014-07-17"
---
<i class="fa fa-google"></i> Google Cloud Storage
-------------------------------------------------
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, eg `remote:bucket/path/to/dir`.
The initial setup for google cloud storage involves getting a token from Google Cloud Storage
which you need to do in your browser. `rclone config` walks you
through it.
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) google cloud storage
5) dropbox
6) drive
type> 4
Google Application Client Id - leave blank to use rclone's.
client_id>
Google Application Client Secret - leave blank to use rclone's.
client_secret>
Project number optional - needed only for list/create/delete buckets - see your developer console.
project_number> 12345678
Access Control List for new objects.
Choose a number from below, or type in your own value
* Object owner gets OWNER access, and all Authenticated Users get READER access.
1) authenticatedRead
* Object owner gets OWNER access, and project team owners get OWNER access.
2) bucketOwnerFullControl
* Object owner gets OWNER access, and project team owners get READER access.
3) bucketOwnerRead
* Object owner gets OWNER access [default if left blank].
4) private
* Object owner gets OWNER access, and project team members get access according to their roles.
5) projectPrivate
* Object owner gets OWNER access, and all Users get READER access.
6) publicRead
object_acl> 4
Access Control List for new buckets.
Choose a number from below, or type in your own value
* Project team owners get OWNER access, and all Authenticated Users get READER access.
1) authenticatedRead
* Project team owners get OWNER access [default if left blank].
2) private
* Project team members get access according to their roles.
3) projectPrivate
* Project team owners get OWNER access, and all Users get READER access.
4) publicRead
* Project team owners get OWNER access, and all Users get WRITER access.
5) publicReadWrite
bucket_acl> 2
Remote config
Go to the following link in your browser
https://accounts.google.com/o/oauth2/auth?access_type=&approval_prompt=&client_id=XXXXXXXXXXXX.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&response_type=code&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdevstorage.full_control&state=state
Log in, then type paste the token that is returned in the browser here
Enter verification code> x/xxxxxxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxxxxxxxxxx_xxxxxxxx
--------------------
[remote]
type = google cloud storage
client_id =
client_secret =
token = {"AccessToken":"xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx","Expiry":"2014-07-17T20:49:14.929208288+01:00","Extra":null}
project_number = 12345678
object_acl = private
bucket_acl = private
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
This remote is called `remote` and can now be used like this
See all the buckets in your project
rclone lsd remote:
Make a new bucket
rclone mkdir remote:bucket
List the contents of a bucket
rclone ls remote:bucket
Sync `/home/local/directory` to the remote bucket, deleting any excess
files in the bucket.
rclone sync /home/local/directory remote:bucket
### Modified time ###
Google google cloud storage stores md5sums natively and rclone stores
modification times as metadata on the object, under the "mtime" key in
RFC3339 format accurate to 1ns.

21
docs/content/install.md Normal file
View File

@@ -0,0 +1,21 @@
---
title: "Install"
description: "Rclone Installation"
date: "2014-07-17"
---
Install
-------
Rclone is a Go program and comes as a single binary file.
[Download](/downloads/) the relevant binary.
Or alternatively if you have Go installed use
go get github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`.
See the [Usage section](/usage/) of the docs for how to use rclone, or
run `rclone -h`.

34
docs/content/licence.md Normal file
View File

@@ -0,0 +1,34 @@
---
title: "Licence"
description: "Rclone Licence"
date: "2015-06-06"
---
License
-------
This is free software under the terms of MIT the license (check the
COPYING file included with the source code).
```
Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
```

View File

@@ -4,8 +4,8 @@ description: "Rclone docs for the local filesystem"
date: "2014-04-26"
---
Local Filesystem
----------------
<i class="fa fa-file"></i> Local Filesystem
-------------------------------------------
Local paths are specified as normal filesystem paths, eg `/path/to/wherever`, so
@@ -16,10 +16,23 @@ Will sync `/home/source` to `/tmp/destination`
These can be configured into the config file for consistencies sake,
but it is probably easier not to.
Modified time
-------------
### Modified time ###
Rclone reads and writes the modified time using an accuracy determined by
the OS. Typically this is 1ns on Linux, 10 ns on Windows and 1 Second
on OS X.
### Filenames ###
Filenames are expected to be encoded in UTF-8 on disk. This is the
normal case for Windows and OS X. There is a bit more uncertainty in
the Linux world, but new distributions will have UTF-8 encoded files
names.
If an invalid (non-UTF8) filename is read, the invalid caracters will
be replaced with the unicode replacement character, '<27>'. `rclone`
will emit a debug message in this case (use `-v` to see), eg
```
Local file system at .: Replacing invalid UTF-8 characters in "gro\xdf"
```

View File

@@ -4,13 +4,11 @@ description: "Rclone docs for Amazon S3"
date: "2014-04-26"
---
Paths are specified as `remote:bucket` or `remote:`
<i class="fa fa-archive"></i> Amazon S3
---------------------------------------
S3 paths can't refer to subdirectories within a bucket (yet).
So to copy a local directory to a s3 container called backup
rclone sync /home/source s3:backup
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, eg `remote:bucket/path/to/dir`.
Here is an example of making an s3 configuration. First run
@@ -102,8 +100,7 @@ files in the bucket.
rclone sync /home/local/directory remote:bucket
Modified time
-------------
### Modified time ###
The modified time is stored as metadata on the object as
`X-Amz-Meta-Mtime` as floating point since the epoch accurate to 1 ns.

View File

@@ -4,13 +4,17 @@ description: "Swift"
date: "2014-04-26"
---
<i class="fa fa-space-shuttle"></i>Swift
----------------------------------------
Swift refers to [Openstack Object Storage](http://www.openstack.org/software/openstack-storage/).
Commercial implementations of that being:
* [Rackspace Cloud Files](http://www.rackspace.com/cloud/files/)
* [Memset Memstore](http://www.memset.com/cloud/storage/)
Paths are specified as `remote:container` or `remote:`
Paths are specified as `remote:container` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, eg `remote:container/path/to/dir`.
Here is an example of making a swift configuration. First run
@@ -48,12 +52,15 @@ Choose a number from below, or type in your own value
* Memset Memstore UK v2
5) https://auth.storage.memset.com/v2.0
auth> 1
Tenant name - optional
tenant>
Remote config
--------------------
[remote]
user = user_name
key = password_or_api_key
auth = https://auth.api.rackspacecloud.com/v1.0
tenant =
--------------------
y) Yes this is OK
e) Edit this remote
@@ -80,8 +87,7 @@ excess files in the container.
rclone sync /home/local/directory remote:container
Modified time
-------------
### Modified time ###
The modified time is stored as metadata on the object as
`X-Object-Meta-Mtime` as floating point since the epoch accurate to 1

View File

@@ -12,17 +12,29 @@
<div class="collapse navbar-collapse navbar-ex1-collapse">
<ul class="nav navbar-nav">
<li><a href="/downloads/"><i class="fa fa-cloud-download"></i> Downloads</a></li>
<li><a href="/docs/"><i class="fa fa-book"></i> Docs</a></li>
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Docs</a>
<ul class="dropdown-menu">
<li><a href="/install/"><i class="fa fa-book"></i> Installation</a></li>
<li><a href="/docs/"><i class="fa fa-book"></i> Usage</a></li>
<li><a href="/changelog/"><i class="fa fa-book"></i> Changelog</a></li>
<li><a href="/bugs/"><i class="fa fa-book"></i> Bugs</a></li>
<li><a href="/licence/"><i class="fa fa-book"></i> Licence</a></li>
<li><a href="/authors/"><i class="fa fa-book"></i> Authors</a></li>
</ul>
</li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Storage Systems</a>
<ul class="dropdown-menu">
<li><a href="/drive/"><i class="fa fa-google"></i> Drive</a></li>
<li><a href="/s3/"><i class="fa fa-archive"></i> S3</a></li>
<li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Swift</a></li>
<li><a href="/dropbox/"><i class="fa fa-dropbox"></i> Dropbox</a></li>
<li><a href="/googlecloudstorage/"><i class="fa fa-google"></i> Google Cloud Storage</a></li>
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
</ul>
</li>
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
</ul>
</div>
</div>

View File

@@ -1,14 +1,6 @@
// Drive interface
package drive
// Gets this quite often
// Failed to set mtime: googleapi: Error 403: Rate Limit Exceeded
// FIXME list containers equivalent should list directories?
// FIXME list directory should list to channel for concurrency not
// append to array
// FIXME need to deal with some corner cases
// * multiple files with the same name
// * files can be in multiple directories
@@ -16,20 +8,18 @@ package drive
// * files with / in name
import (
"encoding/json"
"fmt"
"io"
"log"
"mime"
"net/http"
"path"
"strings"
"sync"
"time"
"code.google.com/p/goauth2/oauth"
"code.google.com/p/google-api-go-client/drive/v2"
"google.golang.org/api/drive/v2"
"google.golang.org/api/googleapi"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/googleauth"
"github.com/ogier/pflag"
)
@@ -38,20 +28,37 @@ const (
rcloneClientId = "202264815644.apps.googleusercontent.com"
rcloneClientSecret = "X4Z3ca8xfWDb1Voo-F9a7ZxJ"
driveFolderType = "application/vnd.google-apps.folder"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// Globals
var (
// Flags
driveFullList = pflag.BoolP("drive-full-list", "", true, "Use a full listing for directory list. More data but usually quicker.")
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
chunkSize = fs.SizeSuffix(256 * 1024)
driveUploadCutoff = chunkSize
// Description of how to auth for this app
driveAuth = &googleauth.Auth{
Scope: "https://www.googleapis.com/auth/drive",
DefaultClientId: rcloneClientId,
DefaultClientSecret: rcloneClientSecret,
}
)
// Register with Fs
func init() {
fs.Register(&fs.FsInfo{
Name: "drive",
NewFs: NewFs,
Config: Config,
Name: "drive",
NewFs: NewFs,
Config: func(name string) {
driveAuth.Config(name)
},
Options: []fs.Option{{
Name: "client_id",
Help: "Google Application Client Id - leave blank to use rclone's.",
@@ -60,77 +67,8 @@ func init() {
Help: "Google Application Client Secret - leave blank to use rclone's.",
}},
})
}
// Configuration helper - called after the user has put in the defaults
func Config(name string) {
// See if already have a token
tokenString := fs.ConfigFile.MustValue(name, "token")
if tokenString != "" {
fmt.Printf("Already have a drive token - refresh?\n")
if !fs.Confirm() {
return
}
}
// Get a drive transport
t, err := newDriveTransport(name)
if err != nil {
log.Fatalf("Couldn't make drive transport: %v", err)
}
// Generate a URL for the user to visit for authorization.
authUrl := t.Config.AuthCodeURL("state")
fmt.Printf("Go to the following link in your browser\n")
fmt.Printf("%s\n", authUrl)
fmt.Printf("Log in, then type paste the token that is returned in the browser here\n")
// Read the code, and exchange it for a token.
fmt.Printf("Enter verification code> ")
authCode := fs.ReadLine()
_, err = t.Exchange(authCode)
if err != nil {
log.Fatalf("Failed to get token: %v", err)
}
}
// A token cache to save the token in the config file section named
type tokenCache string
// Get the token from the config file - returns an error if it isn't present
func (name tokenCache) Token() (*oauth.Token, error) {
tokenString, err := fs.ConfigFile.GetValue(string(name), "token")
if err != nil {
return nil, err
}
if tokenString == "" {
return nil, fmt.Errorf("Empty token found - please reconfigure")
}
token := new(oauth.Token)
err = json.Unmarshal([]byte(tokenString), token)
if err != nil {
return nil, err
}
return token, nil
}
// Save the token to the config file
//
// This saves the config file if it changes
func (name tokenCache) PutToken(token *oauth.Token) error {
tokenBytes, err := json.Marshal(token)
if err != nil {
return err
}
tokenString := string(tokenBytes)
old := fs.ConfigFile.MustValue(string(name), "token")
if tokenString != old {
fs.ConfigFile.SetValue(string(name), "token", tokenString)
fs.SaveConfig()
}
return nil
pflag.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
pflag.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
}
// FsDrive represents a remote drive server
@@ -142,8 +80,10 @@ type FsDrive struct {
rootId string // Id of the root directory
foundRoot bool // Whether we have found the root or not
findRootLock sync.Mutex // Protect findRoot from concurrent use
dirCache dirCache // Map of directory path to directory id
dirCache *dirCache // Map of directory path to directory id
findDirLock sync.Mutex // Protect findDir from concurrent use
pacer chan struct{} // To pace the operations
sleepTime time.Duration // Time to sleep for each transaction
}
// FsObjectDrive describes a drive object
@@ -165,8 +105,8 @@ type dirCache struct {
}
// Make a new locked map
func newDirCache() dirCache {
d := dirCache{}
func newDirCache() *dirCache {
d := &dirCache{}
d.Flush()
return d
}
@@ -210,6 +150,97 @@ func (f *FsDrive) String() string {
return fmt.Sprintf("Google drive root '%s'", f.root)
}
// Start a call to the drive API
//
// This must be called as a pair with endCall
//
// This waits for the pacer token
func (f *FsDrive) beginCall() {
// pacer starts with a token in and whenever we take one out
// XXX ms later we put another in. We could do this with a
// Ticker more accurately, but then we'd have to work out how
// not to run it when it wasn't needed
<-f.pacer
// Restart the timer
go func(t time.Duration) {
// fs.Debug(f, "New sleep for %v at %v", t, time.Now())
time.Sleep(t)
f.pacer <- struct{}{}
}(f.sleepTime)
}
// End a call to the drive API
//
// Refresh the pace given an error that was returned. It returns a
// boolean as to whether the operation should be retried.
//
// See https://developers.google.com/drive/web/handle-errors
// http://stackoverflow.com/questions/18529524/403-rate-limit-after-only-1-insert-per-second
func (f *FsDrive) endCall(err error) bool {
again := false
oldSleepTime := f.sleepTime
if err == nil {
f.sleepTime = (f.sleepTime<<decayConstant - f.sleepTime) >> decayConstant
if f.sleepTime < minSleep {
f.sleepTime = minSleep
}
if f.sleepTime != oldSleepTime {
fs.Debug(f, "Reducing sleep to %v", f.sleepTime)
}
} else {
fs.Debug(f, "Error recived: %T %#v", err, err)
// Check for net error Timeout()
if x, ok := err.(interface {
Timeout() bool
}); ok && x.Timeout() {
again = true
}
// Check for net error Temporary()
if x, ok := err.(interface {
Temporary() bool
}); ok && x.Temporary() {
again = true
}
switch gerr := err.(type) {
case *googleapi.Error:
if gerr.Code >= 500 && gerr.Code < 600 {
// All 5xx errors should be retried
again = true
} else if len(gerr.Errors) > 0 {
reason := gerr.Errors[0].Reason
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
again = true
}
}
}
}
if again {
f.sleepTime *= 2
if f.sleepTime > maxSleep {
f.sleepTime = maxSleep
}
if f.sleepTime != oldSleepTime {
fs.Debug(f, "Rate limited, increasing sleep to %v", f.sleepTime)
}
}
return again
}
// Pace the remote operations to not exceed Google's limits and retry
// on 403 rate limit exceeded
//
// This calls fn, expecting it to place its error in perr
func (f *FsDrive) call(perr *error, fn func()) {
for {
f.beginCall()
fn()
if !f.endCall(*perr) {
break
}
}
}
// parseParse parses a drive 'url'
func parseDrivePath(path string) (root string, err error) {
root = strings.Trim(path, "/")
@@ -247,7 +278,10 @@ func (f *FsDrive) listAll(dirId string, title string, directoriesOnly bool, file
list := f.svc.Files.List().Q(query).MaxResults(1000)
OUTER:
for {
files, err := list.Do()
var files *drive.FileList
f.call(&err, func() {
files, err = list.Do()
})
if err != nil {
return false, fmt.Errorf("Couldn't list directory: %s", err)
}
@@ -265,39 +299,28 @@ OUTER:
return
}
// Makes a new drive transport from the config
func newDriveTransport(name string) (*oauth.Transport, error) {
clientId := fs.ConfigFile.MustValue(name, "client_id")
if clientId == "" {
clientId = rcloneClientId
// Returns true of x is a power of 2 or zero
func isPowerOfTwo(x int64) bool {
switch {
case x == 0:
return true
case x < 0:
return false
default:
return (x & (x - 1)) == 0
}
clientSecret := fs.ConfigFile.MustValue(name, "client_secret")
if clientSecret == "" {
clientSecret = rcloneClientSecret
}
// Settings for authorization.
var driveConfig = &oauth.Config{
ClientId: clientId,
ClientSecret: clientSecret,
Scope: "https://www.googleapis.com/auth/drive",
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
AuthURL: "https://accounts.google.com/o/oauth2/auth",
TokenURL: "https://accounts.google.com/o/oauth2/token",
TokenCache: tokenCache(name),
}
t := &oauth.Transport{
Config: driveConfig,
Transport: http.DefaultTransport,
}
return t, nil
}
// NewFs contstructs an FsDrive from the path, container:path
func NewFs(name, path string) (fs.Fs, error) {
t, err := newDriveTransport(name)
if !isPowerOfTwo(int64(chunkSize)) {
return nil, fmt.Errorf("drive: chunk size %v isn't a power of two", chunkSize)
}
if chunkSize < 256*1024 {
return nil, fmt.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize)
}
t, err := driveAuth.NewTransport(name)
if err != nil {
return nil, err
}
@@ -306,17 +329,16 @@ func NewFs(name, path string) (fs.Fs, error) {
if err != nil {
return nil, err
}
f := &FsDrive{
root: root,
dirCache: newDirCache(),
root: root,
dirCache: newDirCache(),
pacer: make(chan struct{}, 1),
sleepTime: minSleep,
}
// Try to pull the token from the cache; if this fails, we need to get one.
token, err := t.Config.TokenCache.Token()
if err != nil {
return nil, fmt.Errorf("Failed to get token: %s", err)
}
t.Token = token
// Put the first pacing token in
f.pacer <- struct{}{}
// Create a new authorized Drive client.
f.client = t.Client()
@@ -326,15 +348,15 @@ func NewFs(name, path string) (fs.Fs, error) {
}
// Read About so we know the root path
f.about, err = f.svc.About.Get().Do()
f.call(&err, func() {
f.about, err = f.svc.About.Get().Do()
})
if err != nil {
return nil, fmt.Errorf("Couldn't read info about Drive: %s", err)
}
// Find the Id of the root directory and the Id of its parent
f.rootId = f.about.RootFolderId
// Put the root directory in
f.dirCache.Put("", f.rootId)
// Find the Id of the true root and clear everything
f.resetRoot()
// Find the current root
err = f.findRoot(false)
if err != nil {
@@ -348,7 +370,7 @@ func NewFs(name, path string) (fs.Fs, error) {
// No root so return old f
return f, nil
}
obj, err := newF.newFsObjectWithInfo(remote, nil)
obj, err := newF.newFsObjectWithInfoErr(remote, nil)
if err != nil {
// File doesn't exist so return old f
return f, nil
@@ -361,7 +383,7 @@ func NewFs(name, path string) (fs.Fs, error) {
}
// Return an FsObject from a path
func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
func (f *FsDrive) newFsObjectWithInfoErr(remote string, info *drive.File) (fs.Object, error) {
fs := &FsObjectDrive{
drive: f,
remote: remote,
@@ -381,8 +403,8 @@ func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) (fs.Objec
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDrive) NewFsObjectWithInfo(remote string, info *drive.File) fs.Object {
fs, _ := f.newFsObjectWithInfo(remote, info)
func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) fs.Object {
fs, _ := f.newFsObjectWithInfoErr(remote, info)
// Errors have already been logged
return fs
}
@@ -391,7 +413,7 @@ func (f *FsDrive) NewFsObjectWithInfo(remote string, info *drive.File) fs.Object
//
// May return nil if an error occurred
func (f *FsDrive) NewFsObject(remote string) fs.Object {
return f.NewFsObjectWithInfo(remote, nil)
return f.newFsObjectWithInfo(remote, nil)
}
// Path should be directory path either "" or "path/"
@@ -415,7 +437,7 @@ func (f *FsDrive) listDirRecursive(dirId string, path string, out fs.ObjectsChan
} else {
// If item has no MD5 sum it isn't stored on drive, so ignore it
if item.Md5Checksum != "" {
if fs := f.NewFsObjectWithInfo(path+item.Title, item); fs != nil {
if fs := f.newFsObjectWithInfo(path+item.Title, item); fs != nil {
out <- fs
}
}
@@ -465,7 +487,7 @@ func (f *FsDrive) listDirFull(dirId string, path string, out fs.ObjectsChan) err
// fmt.Printf("file %s %s %s\n", path, item.Title, item.Id)
// If item has no MD5 sum it isn't stored on drive, so ignore it
if item.Md5Checksum != "" {
if fs := f.NewFsObjectWithInfo(path, item); fs != nil {
if fs := f.newFsObjectWithInfo(path, item); fs != nil {
out <- fs
}
}
@@ -588,15 +610,18 @@ func (f *FsDrive) _findDir(path string, create bool) (pathId string, err error)
if create {
// fmt.Println("Making", path)
// Define the metadata for the directory we are going to create.
info := &drive.File{
createInfo := &drive.File{
Title: leaf,
Description: leaf,
MimeType: driveFolderType,
Parents: []*drive.ParentReference{{Id: pathId}},
}
info, err := f.svc.Files.Insert(info).Do()
var info *drive.File
f.call(&err, func() {
info, err = f.svc.Files.Insert(createInfo).Do()
})
if err != nil {
return pathId, fmt.Errorf("Failed to make directory")
return pathId, fmt.Errorf("Failed to make directory: %v", err)
}
pathId = info.Id
} else {
@@ -634,6 +659,20 @@ func (f *FsDrive) findRoot(create bool) error {
return nil
}
// Resets the root directory to the absolute root and clears the dirCache
func (f *FsDrive) resetRoot() {
f.findRootLock.Lock()
defer f.findRootLock.Unlock()
f.foundRoot = false
f.dirCache.Flush()
// Put the true root in
f.rootId = f.about.RootFolderId
// Put the root directory in
f.dirCache.Put("", f.rootId)
}
// Walk the path returning a channel of FsObjects
func (f *FsDrive) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
@@ -642,16 +681,16 @@ func (f *FsDrive) List() fs.ObjectsChan {
err := f.findRoot(false)
if err != nil {
fs.Stats.Error()
log.Printf("Couldn't find root: %s", err)
fs.Log(f, "Couldn't find root: %s", err)
} else {
if *driveFullList {
if f.root == "" && *driveFullList {
err = f.listDirFull(f.rootId, "", out)
} else {
err = f.listDirRecursive(f.rootId, "", out)
}
if err != nil {
fs.Stats.Error()
log.Printf("List failed: %s", err)
fs.Log(f, "List failed: %s", err)
}
}
}()
@@ -666,7 +705,7 @@ func (f *FsDrive) ListDir() fs.DirChan {
err := f.findRoot(false)
if err != nil {
fs.Stats.Error()
log.Printf("Couldn't find root: %s", err)
fs.Log(f, "Couldn't find root: %s", err)
} else {
_, err := f.listAll(f.rootId, "", true, false, func(item *drive.File) bool {
dir := &fs.Dir{
@@ -674,13 +713,13 @@ func (f *FsDrive) ListDir() fs.DirChan {
Bytes: -1,
Count: -1,
}
dir.When, _ = time.Parse(time.RFC3339, item.ModifiedDate)
dir.When, _ = time.Parse(timeFormatIn, item.ModifiedDate)
out <- dir
return false
})
if err != nil {
fs.Stats.Error()
log.Printf("ListDir failed: %s", err)
fs.Log(f, "ListDir failed: %s", err)
}
}
}()
@@ -706,26 +745,33 @@ func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64
return o, fmt.Errorf("Couldn't find or make directory: %s", err)
}
// Guess the mime type
mimeType := mime.TypeByExtension(path.Ext(o.remote))
if mimeType == "" {
mimeType = "application/octet-stream"
}
modifiedDate := modTime.Format(time.RFC3339Nano)
// Define the metadata for the file we are going to create.
info := &drive.File{
createInfo := &drive.File{
Title: leaf,
Description: leaf,
Parents: []*drive.ParentReference{{Id: directoryId}},
MimeType: mimeType,
ModifiedDate: modifiedDate,
MimeType: fs.MimeType(o),
ModifiedDate: modTime.Format(timeFormatOut),
}
// Make the API request to upload metadata and file data.
info, err = f.svc.Files.Insert(info).Media(in).Do()
if err != nil {
return o, fmt.Errorf("Upload failed: %s", err)
var info *drive.File
if size == 0 || size < int64(driveUploadCutoff) {
// Make the API request to upload metadata and file data.
// Don't retry, return a retry error instead
f.beginCall()
info, err = f.svc.Files.Insert(createInfo).Media(in).Do()
if f.endCall(err) {
return o, fs.RetryErrorf("Upload failed - retry: %s", err)
}
if err != nil {
return o, fmt.Errorf("Upload failed: %s", err)
}
} else {
// Upload the file in chunks
info, err = f.Upload(in, size, createInfo.MimeType, createInfo, remote)
if err != nil {
return o, err
}
}
o.setMetaData(info)
return o, nil
@@ -744,7 +790,10 @@ func (f *FsDrive) Rmdir() error {
if err != nil {
return err
}
children, err := f.svc.Children.List(f.rootId).MaxResults(10).Do()
var children *drive.ChildList
f.call(&err, func() {
children, err = f.svc.Children.List(f.rootId).MaxResults(10).Do()
})
if err != nil {
return err
}
@@ -753,11 +802,14 @@ func (f *FsDrive) Rmdir() error {
}
// Delete the directory if it isn't the root
if f.root != "" {
err = f.svc.Files.Delete(f.rootId).Do()
f.call(&err, func() {
err = f.svc.Files.Delete(f.rootId).Do()
})
if err != nil {
return err
}
}
f.resetRoot()
return nil
}
@@ -768,7 +820,9 @@ func (fs *FsDrive) Precision() time.Duration {
// Purge deletes all the files and the container
//
// Returns an error if it isn't empty
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *FsDrive) Purge() error {
if f.root == "" {
return fmt.Errorf("Can't purge root directory")
@@ -777,7 +831,10 @@ func (f *FsDrive) Purge() error {
if err != nil {
return err
}
err = f.svc.Files.Delete(f.rootId).Do()
f.call(&err, func() {
err = f.svc.Files.Delete(f.rootId).Do()
})
f.resetRoot()
if err != nil {
return err
}
@@ -864,7 +921,7 @@ func (o *FsObjectDrive) ModTime() time.Time {
fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
modTime, err := time.Parse(time.RFC3339, o.modifiedDate)
modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
if err != nil {
fs.Log(o, "Failed to read mtime from object: %s", err)
return time.Now()
@@ -881,15 +938,21 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) {
return
}
// New metadata
info := &drive.File{
ModifiedDate: modTime.Format(time.RFC3339Nano),
updateInfo := &drive.File{
ModifiedDate: modTime.Format(timeFormatOut),
}
// Set modified date
_, err = o.drive.svc.Files.Update(o.id, info).SetModifiedDate(true).Do()
var info *drive.File
o.drive.call(&err, func() {
info, err = o.drive.svc.Files.Update(o.id, updateInfo).SetModifiedDate(true).Do()
})
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
return
}
// Update info from read data
o.setMetaData(info)
}
// Is this object storable
@@ -899,14 +962,20 @@ func (o *FsObjectDrive) Storable() bool {
// Open an object for read
func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
req, _ := http.NewRequest("GET", o.url, nil)
req.Header.Set("User-Agent", "rclone/1.0")
res, err := o.drive.client.Do(req)
req, err := http.NewRequest("GET", o.url, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", fs.UserAgent)
var res *http.Response
o.drive.call(&err, func() {
res, err = o.drive.client.Do(req)
})
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
res.Body.Close()
_ = res.Body.Close() // ignore error
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
@@ -918,15 +987,30 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
//
// The new object may have been created if an error is returned
func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) error {
info := &drive.File{
updateInfo := &drive.File{
Id: o.id,
ModifiedDate: modTime.Format(time.RFC3339Nano),
ModifiedDate: modTime.Format(timeFormatOut),
}
// Make the API request to upload metadata and file data.
info, err := o.drive.svc.Files.Update(info.Id, info).SetModifiedDate(true).Media(in).Do()
if err != nil {
return fmt.Errorf("Update failed: %s", err)
var err error
var info *drive.File
if size == 0 || size < int64(driveUploadCutoff) {
// Don't retry, return a retry error instead
o.drive.beginCall()
info, err = o.drive.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in).Do()
if o.drive.endCall(err) {
return fs.RetryErrorf("Update failed - retry: %s", err)
}
if err != nil {
return fmt.Errorf("Update failed: %s", err)
}
} else {
// Upload the file in chunks
info, err = o.drive.Upload(in, size, fs.MimeType(o), updateInfo, o.remote)
if err != nil {
return err
}
}
o.setMetaData(info)
return nil
@@ -934,7 +1018,11 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro
// Remove an object
func (o *FsObjectDrive) Remove() error {
return o.drive.svc.Files.Delete(o.id).Do()
var err error
o.drive.call(&err, func() {
err = o.drive.svc.Files.Delete(o.id).Do()
})
return err
}
// Check the interfaces are satisfied

53
drive/drive_test.go Normal file
View File

@@ -0,0 +1,53 @@
// Test Drive filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: go run gen_tests.go or make gen_tests
package drive_test
import (
"testing"
"github.com/ncw/rclone/drive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func init() {
fstests.NilObject = fs.Object((*drive.FsObjectDrive)(nil))
fstests.RemoteName = "TestDrive:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

246
drive/upload.go Normal file
View File

@@ -0,0 +1,246 @@
// Upload for drive
//
// Docs
// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
// Files insert: https://developers.google.com/drive/v2/reference/files/insert
// Files update: https://developers.google.com/drive/v2/reference/files/update
//
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
package drive
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/ncw/rclone/fs"
"google.golang.org/api/drive/v2"
"google.golang.org/api/googleapi"
)
const (
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
statusResumeIncomplete = 308
// Number of times to try each chunk
maxTries = 10
)
// resumableUpload is used by the generated APIs to provide resumable uploads.
// It is not used by developers directly.
type resumableUpload struct {
f *FsDrive
remote string
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
URI string
// Media is the object being uploaded.
Media io.Reader
// MediaType defines the media type, e.g. "image/jpeg".
MediaType string
// ContentLength is the full size of the object being uploaded.
ContentLength int64
// Return value
ret *drive.File
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) {
fileId := info.Id
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(info)
if err != nil {
return nil, err
}
params := make(url.Values)
params.Set("alt", "json")
params.Set("uploadType", "resumable")
urls := "https://www.googleapis.com/upload/drive/v2/files"
method := "POST"
if fileId != "" {
params.Set("setModifiedDate", "true")
urls += "/{fileId}"
method = "PUT"
}
urls += "?" + params.Encode()
req, _ := http.NewRequest(method, urls, body)
googleapi.Expand(req.URL, map[string]string{
"fileId": fileId,
})
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType)
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
req.Header.Set("User-Agent", fs.UserAgent)
var res *http.Response
f.call(&err, func() {
res, err = f.client.Do(req)
if err == nil {
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
})
if err != nil {
return nil, err
}
loc := res.Header.Get("Location")
rx := &resumableUpload{
f: f,
remote: remote,
URI: loc,
Media: in,
MediaType: contentType,
ContentLength: size,
}
return rx.Upload()
}
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(start int64, body []byte) *http.Request {
reqSize := int64(len(body))
req, _ := http.NewRequest("POST", rx.URI, bytes.NewBuffer(body))
req.ContentLength = reqSize
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
}
req.Header.Set("Content-Type", rx.MediaType)
req.Header.Set("User-Agent", fs.UserAgent)
return req
}
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, fmt.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, fmt.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(start int64, body []byte) (int, error) {
req := rx.makeRequest(start, body)
res, err := rx.f.client.Do(req)
if err != nil {
return 599, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == statusResumeIncomplete {
return res.StatusCode, nil
}
err = googleapi.CheckResponse(res)
if err != nil {
return res.StatusCode, err
}
// When the entire file upload is complete, the server
// responds with an HTTP 201 Created along with any metadata
// associated with this resource. If this request had been
// updating an existing entity rather than creating a new one,
// the HTTP response code for a completed upload would have
// been 200 OK.
//
// So parse the response out of the body. We aren't expecting
// any other 2xx codes, so we parse it unconditionaly on
// StatusCode
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
return 598, err
}
return res.StatusCode, nil
}
// Upload uploads the chunks from the input
// It retries each chunk maxTries times (with a pause of uploadPause between attempts).
func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
buf := make([]byte, chunkSize)
var StatusCode int
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
} else {
buf = buf[:reqSize]
}
// Read the chunk
_, err := io.ReadFull(rx.Media, buf)
if err != nil {
return nil, err
}
// Transfer the chunk
for try := 1; try <= maxTries; try++ {
fs.Debug(rx.remote, "Sending chunk %d length %d, %d/%d", start, reqSize, try, maxTries)
rx.f.beginCall()
StatusCode, err = rx.transferChunk(start, buf)
rx.f.endCall(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
goto success
}
fs.Debug(rx.remote, "Retrying chunk %d/%d, code=%d, err=%v", try, maxTries, StatusCode, err)
}
fs.Debug(rx.remote, "Failed to send chunk")
return nil, fs.RetryErrorf("Chunk upload failed - retry: code=%d, err=%v", StatusCode, err)
success:
start += reqSize
}
// Resume or retry uploads that fail due to connection interruptions or
// any 5xx errors, including:
//
// 500 Internal Server Error
// 502 Bad Gateway
// 503 Service Unavailable
// 504 Gateway Timeout
//
// Use an exponential backoff strategy if any 5xx server error is
// returned when resuming or retrying upload requests. These errors can
// occur if a server is getting overloaded. Exponential backoff can help
// alleviate these kinds of problems during periods of high volume of
// requests or heavy network traffic. Other kinds of requests should not
// be handled by exponential backoff but you can still retry a number of
// them. When retrying these requests, limit the number of times you
// retry them. For example your code could limit to ten retries or less
// before reporting an error.
//
// Handle 404 Not Found errors when doing resumable uploads by starting
// the entire upload over from the beginning.
if rx.ret == nil {
return nil, fs.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
}
return rx.ret, nil
}

756
dropbox/dropbox.go Normal file
View File

@@ -0,0 +1,756 @@
// Dropbox interface
package dropbox
/*
Limitations of dropbox
File system is case insensitive
The datastore is limited to 100,000 records which therefore is the
limit of the number of files that rclone can use on dropbox.
FIXME only open datastore if we need it?
FIXME Getting this sometimes
Failed to copy: Upload failed: invalid character '<' looking for beginning of value
This is a JSON decode error - from Update / UploadByChunk
- Caused by 500 error from dropbox
- See https://github.com/stacktic/dropbox/issues/1
- Possibly confusing dropbox with excess concurrency?
FIXME implement timeouts - need to get "github.com/stacktic/dropbox"
and hence "golang.org/x/oauth2" which uses DefaultTransport unless it
is set in the context passed into .Client()
func (db *Dropbox) client() *http.Client {
return db.config.Client(oauth2.NoContext, db.token)
}
// HTTPClient is the context key to use with golang.org/x/net/context's
// WithValue function to associate an *http.Client value with a context.
var HTTPClient ContextKey
So pass in a context with HTTPClient set...
*/
import (
"crypto/md5"
"errors"
"fmt"
"io"
"log"
"path"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/stacktic/dropbox"
)
// Constants
const (
rcloneAppKey = "5jcck7diasz0rqy"
rcloneAppSecret = "1n9m04y2zx7bf26"
uploadChunkSize = 64 * 1024 // chunk size for upload
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
datastoreName = "rclone"
tableName = "metadata"
md5sumField = "md5sum"
mtimeField = "mtime"
maxCommitRetries = 5
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
)
// Register with Fs
func init() {
fs.Register(&fs.FsInfo{
Name: "dropbox",
NewFs: NewFs,
Config: configHelper,
Options: []fs.Option{{
Name: "app_key",
Help: "Dropbox App Key - leave blank to use rclone's.",
}, {
Name: "app_secret",
Help: "Dropbox App Secret - leave blank to use rclone's.",
}},
})
}
// Configuration helper - called after the user has put in the defaults
func configHelper(name string) {
// See if already have a token
token := fs.ConfigFile.MustValue(name, "token")
if token != "" {
fmt.Printf("Already have a dropbox token - refresh?\n")
if !fs.Confirm() {
return
}
}
// Get a dropbox
db := newDropbox(name)
// This method will ask the user to visit an URL and paste the generated code.
if err := db.Auth(); err != nil {
log.Fatalf("Failed to authorize: %v", err)
}
// Get the token
token = db.AccessToken()
// Stuff it in the config file if it has changed
old := fs.ConfigFile.MustValue(name, "token")
if token != old {
fs.ConfigFile.SetValue(name, "token", token)
fs.SaveConfig()
}
}
// FsDropbox represents a remote dropbox server
type FsDropbox struct {
db *dropbox.Dropbox // the connection to the dropbox server
root string // the path we are working on
slashRoot string // root with "/" prefix
slashRootSlash string // root with "/" prefix and postix
datastoreManager *dropbox.DatastoreManager
datastore *dropbox.Datastore
table *dropbox.Table
datastoreMutex sync.Mutex // lock this when using the datastore
datastoreErr error // pending errors on the datastore
}
// FsObjectDropbox describes a dropbox object
type FsObjectDropbox struct {
dropbox *FsDropbox // what this object is part of
remote string // The remote path
md5sum string // md5sum of the object
bytes int64 // size of the object
modTime time.Time // time it was last modified
}
// ------------------------------------------------------------
// String converts this FsDropbox to a string
func (f *FsDropbox) String() string {
return fmt.Sprintf("Dropbox root '%s'", f.root)
}
// Makes a new dropbox from the config
func newDropbox(name string) *dropbox.Dropbox {
db := dropbox.NewDropbox()
appKey := fs.ConfigFile.MustValue(name, "app_key")
if appKey == "" {
appKey = rcloneAppKey
}
appSecret := fs.ConfigFile.MustValue(name, "app_secret")
if appSecret == "" {
appSecret = rcloneAppSecret
}
db.SetAppInfo(appKey, appSecret)
return db
}
// NewFs contstructs an FsDropbox from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
db := newDropbox(name)
f := &FsDropbox{
db: db,
}
f.setRoot(root)
// Read the token from the config file
token := fs.ConfigFile.MustValue(name, "token")
// Authorize the client
db.SetAccessToken(token)
// Make a db to store rclone metadata in
f.datastoreManager = db.NewDatastoreManager()
// Open the datastore in the background
go f.openDataStore()
// See if the root is actually an object
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
if err == nil && !entry.IsDir {
remote := path.Base(f.root)
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
obj := f.NewFsObject(remote)
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
}
return f, nil
}
// Sets root in f
func (f *FsDropbox) setRoot(root string) {
f.root = strings.Trim(root, "/")
f.slashRoot = "/" + f.root
f.slashRootSlash = f.slashRoot
if f.root != "" {
f.slashRootSlash += "/"
}
}
// Opens the datastore in f
func (f *FsDropbox) openDataStore() {
f.datastoreMutex.Lock()
defer f.datastoreMutex.Unlock()
fs.Debug(f, "Open rclone datastore")
// Open the rclone datastore
var err error
f.datastore, err = f.datastoreManager.OpenDatastore(datastoreName)
if err != nil {
fs.Log(f, "Failed to open datastore: %v", err)
f.datastoreErr = err
return
}
// Get the table we are using
f.table, err = f.datastore.GetTable(tableName)
if err != nil {
fs.Log(f, "Failed to open datastore table: %v", err)
f.datastoreErr = err
return
}
fs.Debug(f, "Open rclone datastore finished")
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
o := &FsObjectDropbox{
dropbox: f,
remote: remote,
}
if info != nil {
o.setMetadataFromEntry(info)
} else {
err := o.readEntryAndSetMetadata()
if err != nil {
// logged already fs.Debug("Failed to read info: %s", err)
return nil
}
}
return o
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsDropbox) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
}
// Strips the root off entry and returns it
func (f *FsDropbox) stripRoot(entry *dropbox.Entry) string {
path := entry.Path
if strings.HasPrefix(path, f.slashRootSlash) {
path = path[len(f.slashRootSlash):]
}
return path
}
// Walk the root returning a channel of FsObjects
func (f *FsDropbox) list(out fs.ObjectsChan) {
cursor := ""
for {
deltaPage, err := f.db.Delta(cursor, f.slashRoot)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list: %s", err)
break
} else {
if deltaPage.Reset && cursor != "" {
fs.Log(f, "Unexpected reset during listing - try again")
fs.Stats.Error()
break
}
fs.Debug(f, "%d delta entries received", len(deltaPage.Entries))
for i := range deltaPage.Entries {
deltaEntry := &deltaPage.Entries[i]
entry := deltaEntry.Entry
if entry == nil {
// This notifies of a deleted object
fs.Debug(f, "Deleting metadata for %q", deltaEntry.Path)
key := metadataKey(deltaEntry.Path) // Path is lowercased
err := f.deleteMetadata(key)
if err != nil {
fs.Debug(f, "Failed to delete metadata for %q", deltaEntry.Path)
// Don't accumulate Error here
}
} else {
if entry.IsDir {
// ignore directories
} else {
path := f.stripRoot(entry)
out <- f.newFsObjectWithInfo(path, entry)
}
}
}
if !deltaPage.HasMore {
break
}
cursor = deltaPage.Cursor.Cursor
}
}
}
// Walk the path returning a channel of FsObjects
func (f *FsDropbox) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
defer close(out)
f.list(out)
}()
return out
}
// Walk the path returning a channel of FsObjects
func (f *FsDropbox) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
entry, err := f.db.Metadata(f.root, true, false, "", "", metadataLimit)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list directories in root: %s", err)
} else {
for i := range entry.Contents {
entry := &entry.Contents[i]
if entry.IsDir {
out <- &fs.Dir{
Name: f.stripRoot(entry),
When: time.Time(entry.ClientMtime),
Bytes: int64(entry.Bytes),
Count: -1,
}
}
}
}
}()
return out
}
// A read closer which doesn't close the input
type readCloser struct {
in io.Reader
}
// Read bytes from the object - see io.Reader
func (rc *readCloser) Read(p []byte) (n int, err error) {
return rc.in.Read(p)
}
// Dummy close function
func (rc *readCloser) Close() error {
return nil
}
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *FsDropbox) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction
o := &FsObjectDropbox{dropbox: f, remote: remote}
return o, o.Update(in, modTime, size)
}
// Mkdir creates the container if it doesn't exist
func (f *FsDropbox) Mkdir() error {
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
if err == nil {
if entry.IsDir {
return nil
}
return fmt.Errorf("%q already exists as file", f.root)
}
_, err = f.db.CreateFolder(f.slashRoot)
return err
}
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *FsDropbox) Rmdir() error {
entry, err := f.db.Metadata(f.slashRoot, true, false, "", "", 16)
if err != nil {
return err
}
if len(entry.Contents) != 0 {
return errors.New("Directory not empty")
}
return f.Purge()
}
// Return the precision
func (fs *FsDropbox) Precision() time.Duration {
return time.Nanosecond
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *FsDropbox) Purge() error {
// Delete metadata first
var wg sync.WaitGroup
to_be_deleted := f.List()
wg.Add(fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
go func() {
defer wg.Done()
for dst := range to_be_deleted {
o := dst.(*FsObjectDropbox)
o.deleteMetadata()
}
}()
}
wg.Wait()
// Let dropbox delete the filesystem tree
_, err := f.db.Delete(f.slashRoot)
return err
}
// Tries the transaction in fn then calls commit, repeating until retry limit
//
// Holds datastore mutex while in progress
func (f *FsDropbox) transaction(fn func() error) error {
f.datastoreMutex.Lock()
defer f.datastoreMutex.Unlock()
if f.datastoreErr != nil {
return f.datastoreErr
}
var err error
for i := 1; i <= maxCommitRetries; i++ {
err = fn()
if err != nil {
return err
}
err = f.datastore.Commit()
if err == nil {
break
}
fs.Debug(f, "Retrying transaction %d/%d", i, maxCommitRetries)
}
if err != nil {
return fmt.Errorf("Failed to commit metadata changes: %s", err)
}
return nil
}
// Deletes the medadata associated with this key
func (f *FsDropbox) deleteMetadata(key string) error {
return f.transaction(func() error {
record, err := f.table.Get(key)
if err != nil {
return fmt.Errorf("Couldn't get record: %s", err)
}
if record == nil {
return nil
}
record.DeleteRecord()
return nil
})
}
// Reads the record attached to key
//
// Holds datastore mutex while in progress
func (f *FsDropbox) readRecord(key string) (*dropbox.Record, error) {
f.datastoreMutex.Lock()
defer f.datastoreMutex.Unlock()
if f.datastoreErr != nil {
return nil, f.datastoreErr
}
return f.table.Get(key)
}
// ------------------------------------------------------------
// Return the parent Fs
func (o *FsObjectDropbox) Fs() fs.Fs {
return o.dropbox
}
// Return a string version
func (o *FsObjectDropbox) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Return the remote path
func (o *FsObjectDropbox) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
//
// FIXME has to download the file!
func (o *FsObjectDropbox) Md5sum() (string, error) {
if o.md5sum != "" {
return o.md5sum, nil
}
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
return "", fmt.Errorf("Failed to read metadata: %s", err)
}
// For pre-existing files which have no md5sum can read it and set it?
// in, err := o.Open()
// if err != nil {
// return "", err
// }
// defer in.Close()
// hash := md5.New()
// _, err = io.Copy(hash, in)
// if err != nil {
// return "", err
// }
// o.md5sum = fmt.Sprintf("%x", hash.Sum(nil))
return o.md5sum, nil
}
// Size returns the size of an object in bytes
func (o *FsObjectDropbox) Size() int64 {
return o.bytes
}
// setMetadataFromEntry sets the fs data from a dropbox.Entry
//
// This isn't a complete set of metadata and has an inacurate date
func (o *FsObjectDropbox) setMetadataFromEntry(info *dropbox.Entry) {
o.bytes = int64(info.Bytes)
o.modTime = time.Time(info.ClientMtime)
}
// Reads the entry from dropbox
func (o *FsObjectDropbox) readEntry() (*dropbox.Entry, error) {
entry, err := o.dropbox.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit)
if err != nil {
fs.Debug(o, "Error reading file: %s", err)
return nil, fmt.Errorf("Error reading file: %s", err)
}
return entry, nil
}
// Read entry if not set and set metadata from it
func (o *FsObjectDropbox) readEntryAndSetMetadata() error {
// Last resort set time from client
if !o.modTime.IsZero() {
return nil
}
entry, err := o.readEntry()
if err != nil {
return err
}
o.setMetadataFromEntry(entry)
return nil
}
// Returns the remote path for the object
func (o *FsObjectDropbox) remotePath() string {
return o.dropbox.slashRootSlash + o.remote
}
// Returns the key for the metadata database for a given path
func metadataKey(path string) string {
// NB File system is case insensitive
path = strings.ToLower(path)
hash := md5.New()
_, _ = hash.Write([]byte(path))
return fmt.Sprintf("%x", hash.Sum(nil))
}
// Returns the key for the metadata database
func (o *FsObjectDropbox) metadataKey() string {
return metadataKey(o.remotePath())
}
// readMetaData gets the info if it hasn't already been fetched
func (o *FsObjectDropbox) readMetaData() (err error) {
if o.md5sum != "" {
return nil
}
// fs.Debug(o, "Reading metadata from datastore")
record, err := o.dropbox.readRecord(o.metadataKey())
if err != nil {
fs.Debug(o, "Couldn't read metadata: %s", err)
record = nil
}
if record != nil {
// Read md5sum
md5sumInterface, ok, err := record.Get(md5sumField)
if err != nil {
return err
}
if !ok {
fs.Debug(o, "Couldn't find md5sum in record")
} else {
md5sum, ok := md5sumInterface.(string)
if !ok {
fs.Debug(o, "md5sum not a string")
} else {
o.md5sum = md5sum
}
}
// read mtime
mtimeInterface, ok, err := record.Get(mtimeField)
if err != nil {
return err
}
if !ok {
fs.Debug(o, "Couldn't find mtime in record")
} else {
mtime, ok := mtimeInterface.(string)
if !ok {
fs.Debug(o, "mtime not a string")
} else {
modTime, err := time.Parse(timeFormatIn, mtime)
if err != nil {
return err
}
o.modTime = modTime
}
}
}
// Last resort
return o.readEntryAndSetMetadata()
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *FsObjectDropbox) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
return o.modTime
}
// Sets the modification time of the local fs object into the record
// FIXME if we don't set md5sum what will that do?
func (o *FsObjectDropbox) setModTimeAndMd5sum(modTime time.Time, md5sum string) error {
key := o.metadataKey()
// fs.Debug(o, "Writing metadata to datastore")
return o.dropbox.transaction(func() error {
record, err := o.dropbox.table.GetOrInsert(key)
if err != nil {
return fmt.Errorf("Couldn't read record: %s", err)
}
if md5sum != "" {
err = record.Set(md5sumField, md5sum)
if err != nil {
return fmt.Errorf("Couldn't set md5sum record: %s", err)
}
o.md5sum = md5sum
}
if !modTime.IsZero() {
mtime := modTime.Format(timeFormatOut)
err := record.Set(mtimeField, mtime)
if err != nil {
return fmt.Errorf("Couldn't set mtime record: %s", err)
}
o.modTime = modTime
}
return nil
})
}
// Deletes the medadata associated with this file
//
// It logs any errors
func (o *FsObjectDropbox) deleteMetadata() {
fs.Debug(o, "Deleting metadata from datastore")
err := o.dropbox.deleteMetadata(o.metadataKey())
if err != nil {
fs.Log(o, "Error deleting metadata: %v", err)
fs.Stats.Error()
}
}
// Sets the modification time of the local fs object
//
// Commits the datastore
func (o *FsObjectDropbox) SetModTime(modTime time.Time) {
err := o.setModTimeAndMd5sum(modTime, "")
if err != nil {
fs.Stats.Error()
fs.Log(o, err.Error())
}
}
// Is this object storable
func (o *FsObjectDropbox) Storable() bool {
return true
}
// Open an object for read
func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
in, _, err = o.dropbox.db.Download(o.remotePath(), "", 0)
return
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) error {
// Calculate md5sum as we upload it
hash := md5.New()
rc := &readCloser{in: io.TeeReader(in, hash)}
entry, err := o.dropbox.db.UploadByChunk(rc, uploadChunkSize, o.remotePath(), true, "")
if err != nil {
return fmt.Errorf("Upload failed: %s", err)
}
o.setMetadataFromEntry(entry)
md5sum := fmt.Sprintf("%x", hash.Sum(nil))
return o.setModTimeAndMd5sum(modTime, md5sum)
}
// Remove an object
func (o *FsObjectDropbox) Remove() error {
o.deleteMetadata()
_, err := o.dropbox.db.Delete(o.remotePath())
return err
}
// Check the interfaces are satisfied
var _ fs.Fs = &FsDropbox{}
var _ fs.Purger = &FsDropbox{}
var _ fs.Object = &FsObjectDropbox{}

53
dropbox/dropbox_test.go Normal file
View File

@@ -0,0 +1,53 @@
// Test Dropbox filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: go run gen_tests.go or make gen_tests
package dropbox_test
import (
"testing"
"github.com/ncw/rclone/dropbox"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func init() {
fstests.NilObject = fs.Object((*dropbox.FsObjectDropbox)(nil))
fstests.RemoteName = "TestDropbox:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -10,13 +10,24 @@ import (
"strings"
"sync"
"time"
"github.com/tsenart/tb"
)
// Globals
var (
Stats = NewStats()
Stats = NewStats()
tokenBucket *tb.Bucket
)
// Start the token bucket if necessary
func startTokenBucket() {
if bwLimit > 0 {
tokenBucket = tb.NewBucket(int64(bwLimit), 100*time.Millisecond)
Log(nil, "Starting bandwidth limiter at %vBytes/s", &bwLimit)
}
}
// Stringset holds some strings
type StringSet map[string]bool
@@ -113,6 +124,16 @@ func (s *StatsInfo) GetErrors() int64 {
return s.errors
}
// ResetCounters sets the counters (bytes, checks, errors, transfers) to 0
func (s *StatsInfo) ResetCounters() {
s.lock.RLock()
defer s.lock.RUnlock()
s.bytes = 0
s.errors = 0
s.checks = 0
s.transfers = 0
}
// Errored returns whether there have been any errors
func (s *StatsInfo) Errored() bool {
s.lock.RLock()
@@ -142,6 +163,13 @@ func (s *StatsInfo) DoneChecking(o Object) {
s.checks += 1
}
// GetTransfers reads the number of transfers
func (s *StatsInfo) GetTransfers() int64 {
s.lock.RLock()
defer s.lock.RUnlock()
return s.transfers
}
// Transferring adds a transfer into the stats
func (s *StatsInfo) Transferring(o Object) {
s.lock.Lock()
@@ -159,6 +187,12 @@ func (s *StatsInfo) DoneTransferring(o Object) {
// Account limits and accounts for one transfer
type Account struct {
// The mutex is to make sure Read() and Close() aren't called
// concurrently. Unfortunately the persistent connection loop
// in http transport calls Read() after Do() returns on
// CancelRequest so this race can happen when it apparently
// shouldn't.
mu sync.Mutex
in io.ReadCloser
bytes int64
}
@@ -172,17 +206,25 @@ func NewAccount(in io.ReadCloser) *Account {
// Read bytes from the object - see io.Reader
func (file *Account) Read(p []byte) (n int, err error) {
file.mu.Lock()
defer file.mu.Unlock()
n, err = file.in.Read(p)
file.bytes += int64(n)
Stats.Bytes(int64(n))
if err == io.EOF {
// FIXME Do something?
}
// Limit the transfer speed if required
if tokenBucket != nil {
tokenBucket.Wait(int64(n))
}
return
}
// Close the object
func (file *Account) Close() error {
file.mu.Lock()
defer file.mu.Unlock()
// FIXME do something?
return file.in.Close()
}

View File

@@ -6,6 +6,8 @@ import (
"bufio"
"fmt"
"log"
"math"
"net/http"
"os"
"os/user"
"path"
@@ -15,6 +17,7 @@ import (
"time"
"github.com/Unknwon/goconfig"
"github.com/mreiferson/go-httpclient"
"github.com/ogier/pflag"
)
@@ -22,6 +25,8 @@ const (
configFileName = ".rclone.conf"
)
type SizeSuffix int64
// Global
var (
// Config file
@@ -33,34 +38,151 @@ var (
// Global config
Config = &ConfigInfo{}
// Flags
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
bwLimit SizeSuffix
)
func init() {
pflag.VarP(&bwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix k|M|G")
}
// Turn SizeSuffix into a string
func (x SizeSuffix) String() string {
scaled := float64(0)
suffix := ""
switch {
case x == 0:
return "0"
case x < 1024*1024:
scaled = float64(x) / 1024
suffix = "k"
case x < 1024*1024*1024:
scaled = float64(x) / 1024 / 1024
suffix = "M"
default:
scaled = float64(x) / 1024 / 1024 / 1024
suffix = "G"
}
if math.Floor(scaled) == scaled {
return fmt.Sprintf("%.0f%s", scaled, suffix)
}
return fmt.Sprintf("%.3f%s", scaled, suffix)
}
// Set a SizeSuffix
func (x *SizeSuffix) Set(s string) error {
if len(s) == 0 {
return fmt.Errorf("Empty string")
}
suffix := s[len(s)-1]
suffixLen := 1
var multiplier float64
switch suffix {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
suffixLen = 0
multiplier = 1 << 10
case 'k', 'K':
multiplier = 1 << 10
case 'm', 'M':
multiplier = 1 << 20
case 'g', 'G':
multiplier = 1 << 30
default:
return fmt.Errorf("Bad suffix %q", suffix)
}
s = s[:len(s)-suffixLen]
value, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if value < 0 {
return fmt.Errorf("Size can't be negative %q", s)
}
value *= multiplier
*x = SizeSuffix(value)
return nil
}
// Check it satisfies the interface
var _ pflag.Value = (*SizeSuffix)(nil)
// Filesystem config options
type ConfigInfo struct {
Verbose bool
Quiet bool
DryRun bool
ModifyWindow time.Duration
Checkers int
Transfers int
Verbose bool
Quiet bool
DryRun bool
CheckSum bool
SizeOnly bool
ModifyWindow time.Duration
Checkers int
Transfers int
ConnectTimeout time.Duration // Connect timeout
Timeout time.Duration // Data channel timeout
}
// Transport returns an http.RoundTripper with the correct timeouts
func (ci *ConfigInfo) Transport() http.RoundTripper {
return &httpclient.Transport{
Proxy: http.ProxyFromEnvironment,
MaxIdleConnsPerHost: ci.Checkers + ci.Transfers + 1,
// ConnectTimeout, if non-zero, is the maximum amount of time a dial will wait for
// a connect to complete.
ConnectTimeout: ci.ConnectTimeout,
// ResponseHeaderTimeout, if non-zero, specifies the amount of
// time to wait for a server's response headers after fully
// writing the request (including its body, if any). This
// time does not include the time to read the response body.
ResponseHeaderTimeout: ci.Timeout,
// RequestTimeout, if non-zero, specifies the amount of time for the entire
// request to complete (including all of the above timeouts + entire response body).
// This should never be less than the sum total of the above two timeouts.
//RequestTimeout: NOT SET,
// ReadWriteTimeout, if non-zero, will set a deadline for every Read and
// Write operation on the request connection.
ReadWriteTimeout: ci.Timeout,
}
}
// Transport returns an http.Client with the correct timeouts
func (ci *ConfigInfo) Client() *http.Client {
return &http.Client{
Transport: ci.Transport(),
}
}
// Find the config directory
func configHome() string {
// Find users home directory
usr, err := user.Current()
if err != nil {
log.Printf("Couldn't find home directory: %v", err)
return ""
if err == nil {
return usr.HomeDir
}
return usr.HomeDir
// Fall back to reading $HOME - work around user.Current() not
// working for cross compiled binaries on OSX.
// https://github.com/golang/go/issues/6376
home := os.Getenv("HOME")
if home != "" {
return home
}
log.Printf("Couldn't find home directory or read HOME environment variable.")
log.Printf("Defaulting to storing config in current directory.")
log.Printf("Use -config flag to workaround.")
log.Printf("Error was: %v", err)
return ""
}
// Loads the config file
@@ -74,6 +196,10 @@ func LoadConfig() {
Config.Checkers = *checkers
Config.Transfers = *transfers
Config.DryRun = *dryRun
Config.Timeout = *timeout
Config.ConnectTimeout = *connectTimeout
Config.CheckSum = *checkSum
Config.SizeOnly = *sizeOnly
ConfigPath = *configFile
@@ -87,6 +213,9 @@ func LoadConfig() {
log.Fatalf("Failed to read null config file: %v", err)
}
}
// Start the token bucket limiter
startTokenBucket()
}
// Save configuration file.
@@ -320,9 +449,20 @@ func EditConfig() {
name := ChooseRemote()
EditRemote(name)
case 'n':
fmt.Printf("name> ")
name := ReadLine()
NewRemote(name)
nameLoop:
for {
fmt.Printf("name> ")
name := ReadLine()
switch {
case name == "":
fmt.Printf("Can't use empty name\n")
case isDriveLetter(name):
fmt.Printf("Can't use %q as it can be confused a drive letter\n", name)
default:
NewRemote(name)
break nameLoop
}
}
case 'd':
name := ChooseRemote()
DeleteRemote(name)

57
fs/config_test.go Normal file
View File

@@ -0,0 +1,57 @@
package fs
import "testing"
func TestSizeSuffixString(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0"},
{102, "0.100k"},
{1024, "1k"},
{1024 * 1024, "1M"},
{1024 * 1024 * 1024, "1G"},
{10 * 1024 * 1024 * 1024, "10G"},
{10.1 * 1024 * 1024 * 1024, "10.100G"},
} {
ss := SizeSuffix(test.in)
got := ss.String()
if test.want != got {
t.Errorf("Want %v got %v", test.want, got)
}
}
}
func TestSizeSuffixSet(t *testing.T) {
for i, test := range []struct {
in string
want int64
err bool
}{
{"0", 0, false},
{"0.1k", 102, false},
{"0.1", 102, false},
{"1K", 1024, false},
{"1", 1024, false},
{"2.5", 1024 * 2.5, false},
{"1M", 1024 * 1024, false},
{"1.g", 1024 * 1024 * 1024, false},
{"10G", 10 * 1024 * 1024 * 1024, false},
{"", 0, true},
{"1p", 0, true},
{"1.p", 0, true},
{"1p", 0, true},
{"-1K", 0, true},
} {
ss := SizeSuffix(0)
err := ss.Set(test.in)
if (err != nil) != test.err {
t.Errorf("%d: Expecting error %v but got error %v", i, test.err, err)
}
got := int64(ss)
if test.want != got {
t.Errorf("%d: Want %v got %v", i, test.want, got)
}
}
}

12
fs/driveletter.go Normal file
View File

@@ -0,0 +1,12 @@
// +build !windows
package fs
// isDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
//
// On non windows platforms we don't have drive letters so we always
// return false
func isDriveLetter(name string) bool {
return false
}

13
fs/driveletter_windows.go Normal file
View File

@@ -0,0 +1,13 @@
// +build windows
package fs
// isDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
func isDriveLetter(name string) bool {
if len(name) != 1 {
return false
}
c := name[0]
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}

View File

@@ -1,19 +1,27 @@
// File system interface
// Generic file system interface for rclone object storage systems
package fs
import (
"fmt"
"io"
"log"
"path/filepath"
"regexp"
"time"
)
// Constants
const (
// User agent for Fs which can set it
UserAgent = "rclone/" + Version
)
// Globals
var (
// Filesystem registry
fsRegistry []*FsInfo
// Error returned by NewFs if not found in config file
NotFoundInConfigFile = fmt.Errorf("Didn't find section in config file")
)
// Filesystem info
@@ -73,9 +81,13 @@ type Fs interface {
Put(in io.Reader, remote string, modTime time.Time, size int64) (Object, error)
// Make the directory (container, bucket)
//
// Shouldn't return an error if it already exists
Mkdir() error
// Remove the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
Rmdir() error
// Precision of the ModTimes in this Fs
@@ -125,9 +137,40 @@ type Purger interface {
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
Purge() error
}
// An optional interface for error as to whether the operation should be retried
//
// This should be returned from Update or Put methods as required
type Retry interface {
error
Retry() bool
}
// A type of error
type retryError string
// Error interface
func (r retryError) Error() string {
return string(r)
}
// Retry interface
func (r retryError) Retry() bool {
return true
}
// Check interface
var _ Retry = retryError("")
// RetryErrorf makes an error which indicates it would like to be retried
func RetryErrorf(format string, a ...interface{}) error {
return retryError(fmt.Sprintf(format, a...))
}
// A channel of Objects
type ObjectsChan chan Object
@@ -153,9 +196,6 @@ type Dir struct {
// A channel of Dir objects
type DirChan chan *Dir
// Pattern to match a url
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
// Finds a FsInfo object for the name passed in
//
// Services are looked up in the config file
@@ -168,34 +208,43 @@ func Find(name string) (*FsInfo, error) {
return nil, fmt.Errorf("Didn't find filing system for %q", name)
}
// Pattern to match an rclone url
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
// NewFs makes a new Fs object from the path
//
// The path is of the form service://path
// The path is of the form remote:path
//
// Services are looked up in the config file
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(path string) (Fs, error) {
parts := matcher.FindStringSubmatch(path)
fsName, configName, fsPath := "local", "local", path
if parts != nil {
if parts != nil && !isDriveLetter(parts[1]) {
configName, fsPath = parts[1], parts[2]
var err error
fsName, err = ConfigFile.GetValue(configName, "type")
if err != nil {
return nil, fmt.Errorf("Didn't find section in config file for %q", configName)
return nil, NotFoundInConfigFile
}
}
fs, err := Find(fsName)
if err != nil {
return nil, err
}
// change native directory separators to / if there are any
fsPath = filepath.ToSlash(fsPath)
return fs.NewFs(configName, fsPath)
}
// Outputs log for object
func OutputLog(o interface{}, text string, args ...interface{}) {
description := ""
if x, ok := o.(fmt.Stringer); ok {
description = x.String() + ": "
if o != nil {
description = fmt.Sprintf("%v: ", o)
}
out := fmt.Sprintf(text, args...)
log.Print(description + out)

View File

@@ -4,8 +4,11 @@ package fs
import (
"fmt"
"log"
"io"
"mime"
"path"
"sync"
"time"
)
// Work out modify window for fses passed in - sets Config.ModifyWindow
@@ -51,15 +54,16 @@ func CheckMd5sums(src, dst Object) (bool, error) {
// size, mtime and MD5SUM
//
// If the src and dst size are different then it is considered to be
// not equal.
// not equal. If --size-only is in effect then this is the only check
// that is done.
//
// If the size is the same and the mtime is the same then it is
// considered to be equal. This is the heuristic rsync uses when
// not using --checksum.
// considered to be equal. This check is skipped if using --checksum.
//
// If the size is the same and and mtime is different or unreadable
// and the MD5SUM is the same then the file is considered to be equal.
// In this case the mtime on the dst is updated.
// If the size is the same and mtime is different, unreadable or
// --checksum is set and the MD5SUM is the same then the file is
// considered to be equal. In this case the mtime on the dst is
// updated if --checksum is not set.
//
// Otherwise the file is considered to be not equal including if there
// were errors reading info.
@@ -68,19 +72,26 @@ func Equal(src, dst Object) bool {
Debug(src, "Sizes differ")
return false
}
// Size the same so check the mtime
srcModTime := src.ModTime()
dstModTime := dst.ModTime()
dt := dstModTime.Sub(srcModTime)
ModifyWindow := Config.ModifyWindow
if dt >= ModifyWindow || dt <= -ModifyWindow {
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
} else {
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
if Config.SizeOnly {
Debug(src, "Sizes identical")
return true
}
var srcModTime time.Time
if !Config.CheckSum {
// Size the same so check the mtime
srcModTime = src.ModTime()
dstModTime := dst.ModTime()
dt := dstModTime.Sub(srcModTime)
ModifyWindow := Config.ModifyWindow
if dt >= ModifyWindow || dt <= -ModifyWindow {
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
} else {
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
return true
}
}
// mtime is unreadable or different but size is the same so
// check the MD5SUM
same, _ := CheckMd5sums(src, dst)
@@ -89,20 +100,51 @@ func Equal(src, dst Object) bool {
return false
}
// Size and MD5 the same but mtime different so update the
// mtime of the dst object here
dst.SetModTime(srcModTime)
if !Config.CheckSum {
// Size and MD5 the same but mtime different so update the
// mtime of the dst object here
dst.SetModTime(srcModTime)
}
Debug(src, "Size and MD5SUM of src and dst objects identical")
return true
}
// Returns a guess at the mime type from the extension
func MimeType(o Object) string {
mimeType := mime.TypeByExtension(path.Ext(o.Remote()))
if mimeType == "" {
mimeType = "application/octet-stream"
}
return mimeType
}
// Used to remove a failed copy
//
// Returns whether the file was succesfully removed or not
func removeFailedCopy(dst Object) bool {
if dst == nil {
return false
}
Debug(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Debug(dst, "Failed to remove failed copy: %s", removeErr)
return false
}
return true
}
// Copy src object to dst or f if nil
//
// If dst is nil then the object must not exist already. If you do
// call Copy() with dst nil on a pre-existing file then some filing
// systems (eg Drive) may duplicate the file.
func Copy(f Fs, dst, src Object) {
const maxTries = 10
tries := 0
doUpdate := dst != nil
tryAgain:
in0, err := src.Open()
if err != nil {
Stats.Error()
@@ -112,7 +154,7 @@ func Copy(f Fs, dst, src Object) {
in := NewAccount(in0) // account the transfer
var actionTaken string
if dst != nil {
if doUpdate {
actionTaken = "Copied (updated existing)"
err = dst.Update(in, src.ModTime(), src.Size())
} else {
@@ -120,22 +162,57 @@ func Copy(f Fs, dst, src Object) {
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
}
inErr := in.Close()
// Retry if err returned a retry error
if r, ok := err.(Retry); ok && r.Retry() && tries < maxTries {
tries++
Log(src, "Received error: %v - retrying %d/%d", err, tries, maxTries)
if removeFailedCopy(dst) {
// If we removed dst, then nil it out and note we are not updating
dst = nil
doUpdate = false
}
goto tryAgain
}
if err == nil {
err = inErr
}
if err != nil {
Stats.Error()
Log(src, "Failed to copy: %s", err)
if dst != nil {
Debug(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Stats.Error()
Log(dst, "Failed to remove failed copy: %s", removeErr)
}
}
removeFailedCopy(dst)
return
}
// Verify sizes are the same after transfer
if src.Size() != dst.Size() {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
Log(dst, "%s", err)
removeFailedCopy(dst)
return
}
// Verify md5sums are the same after transfer - ignoring blank md5sums
if !Config.SizeOnly {
srcMd5sum, md5sumErr := src.Md5sum()
if md5sumErr != nil {
Stats.Error()
Log(src, "Failed to read md5sum: %s", md5sumErr)
} else if srcMd5sum != "" {
dstMd5sum, md5sumErr := dst.Md5sum()
if md5sumErr != nil {
Stats.Error()
Log(dst, "Failed to read md5sum: %s", md5sumErr)
} else if dstMd5sum != "" && srcMd5sum != dstMd5sum {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
Log(dst, "%s", err)
removeFailedCopy(dst)
return
}
}
}
Debug(src, actionTaken)
}
@@ -159,7 +236,7 @@ func checkOne(pair ObjectPair, out ObjectPairChan) {
out <- pair
}
// Read FsObjects~s on in send to out if they need uploading
// Read Objects~s on in send to out if they need uploading
//
// FIXME potentially doing lots of MD5SUMS at once
func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
@@ -172,7 +249,7 @@ func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
}
}
// Read FsObjects on in and copy them
// Read Objects on in and copy them
func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
defer wg.Done()
for pair := range in {
@@ -191,12 +268,10 @@ func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
func DeleteFiles(to_be_deleted ObjectsChan) {
var wg sync.WaitGroup
wg.Add(Config.Transfers)
var fs Fs
for i := 0; i < Config.Transfers; i++ {
go func() {
defer wg.Done()
for dst := range to_be_deleted {
fs = dst.Fs()
if Config.DryRun {
Debug(dst, "Not deleting as --dry-run")
} else {
@@ -213,11 +288,24 @@ func DeleteFiles(to_be_deleted ObjectsChan) {
}
}()
}
Log(fs, "Waiting for deletions to finish")
Log(nil, "Waiting for deletions to finish")
wg.Wait()
}
// Read a map of Object.Remote to Object for the given Fs
func readFilesMap(fs Fs) map[string]Object {
files := make(map[string]Object)
for o := range fs.List() {
remote := o.Remote()
if _, ok := files[remote]; !ok {
files[remote] = o
} else {
Log(o, "Duplicate file detected")
}
}
return files
}
// Syncs fsrc into fdst
//
// If Delete is true then it deletes any files in fdst that aren't in fsrc
@@ -232,10 +320,7 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
// Read the destination files first
// FIXME could do this in parallel and make it use less memory
delFiles := make(map[string]Object)
for dst := range fdst.List() {
delFiles[dst.Remote()] = dst
}
delFiles := readFilesMap(fdst)
// Read source files checking them off against dest files
to_be_checked := make(ObjectPairChan, Config.Transfers)
@@ -300,22 +385,20 @@ func Check(fdst, fsrc Fs) error {
// Read the destination files first
// FIXME could do this in parallel and make it use less memory
dstFiles := make(map[string]Object)
for dst := range fdst.List() {
dstFiles[dst.Remote()] = dst
}
dstFiles := readFilesMap(fdst)
// Read the source files checking them against dstFiles
// FIXME could do this in parallel and make it use less memory
srcFiles := make(map[string]Object)
srcFiles := readFilesMap(fsrc)
// Move all the common files into commonFiles and delete then
// from srcFiles and dstFiles
commonFiles := make(map[string][]Object)
for src := range fsrc.List() {
remote := src.Remote()
for remote, src := range srcFiles {
if dst, ok := dstFiles[remote]; ok {
commonFiles[remote] = []Object{dst, src}
delete(srcFiles, remote)
delete(dstFiles, remote)
} else {
srcFiles[remote] = src
}
}
@@ -376,10 +459,10 @@ func Check(fdst, fsrc Fs) error {
return nil
}
// List the Fs to stdout
// List the Fs to the supplied function
//
// Lists in parallel which may get them out of order
func List(f Fs) error {
func ListFn(f Fs, fn func(Object)) error {
in := f.List()
var wg sync.WaitGroup
wg.Add(Config.Checkers)
@@ -387,10 +470,7 @@ func List(f Fs) error {
go func() {
defer wg.Done()
for o := range in {
Stats.Checking(o)
modTime := o.ModTime()
Stats.DoneChecking(o)
fmt.Printf("%9d %19s %s\n", o.Size(), modTime.Format("2006-01-02 15:04:05.00000000"), o.Remote())
fn(o)
}
}()
}
@@ -398,10 +478,63 @@ func List(f Fs) error {
return nil
}
// mutex for synchronized output
var outMutex sync.Mutex
// Synchronized fmt.Fprintf
func syncFprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
outMutex.Lock()
defer outMutex.Unlock()
return fmt.Fprintf(w, format, a...)
}
// List the Fs to stdout
//
// Shows size and path
//
// Lists in parallel which may get them out of order
func List(f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
syncFprintf(w, "%9d %s\n", o.Size(), o.Remote())
})
}
// List the Fs to stdout
//
// Shows size, mod time and path
//
// Lists in parallel which may get them out of order
func ListLong(f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
Stats.Checking(o)
modTime := o.ModTime()
Stats.DoneChecking(o)
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Format("2006-01-02 15:04:05.000000000"), o.Remote())
})
}
// List the Fs to stdout
//
// Produces the same output as the md5sum command
//
// Lists in parallel which may get them out of order
func Md5sum(f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
Stats.Checking(o)
md5sum, err := o.Md5sum()
Stats.DoneChecking(o)
if err != nil {
Debug(o, "Failed to read MD5: %v", err)
md5sum = "UNKNOWN"
}
syncFprintf(w, "%32s %s\n", md5sum, o.Remote())
})
}
// List the directories/buckets/containers in the Fs to stdout
func ListDir(f Fs) error {
func ListDir(f Fs, w io.Writer) error {
for dir := range f.ListDir() {
fmt.Printf("%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
syncFprintf(w, "%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
}
return nil
}
@@ -434,16 +567,21 @@ func Rmdir(f Fs) error {
//
// FIXME doesn't delete local directories
func Purge(f Fs) error {
var err error
if purger, ok := f.(Purger); ok {
err := purger.Purge()
if err != nil {
Stats.Error()
return err
if Config.DryRun {
Debug(f, "Not purging as --dry-run set")
} else {
err = purger.Purge()
}
} else {
// DeleteFiles and Rmdir observe --dry-run
DeleteFiles(f.List())
log.Printf("Deleting path")
Rmdir(f)
err = Rmdir(f)
}
if err != nil {
Stats.Error()
return err
}
return nil
}

442
fs/operations_test.go Normal file
View File

@@ -0,0 +1,442 @@
// Test rclone by doing real transactions to a storage provider to and
// from the local disk
package fs_test
import (
"bytes"
"flag"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest"
// Active file systems
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
)
// Globals
var (
localName, remoteName string
flocal, fremote fs.Fs
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
SubDir = flag.Bool("subdir", false, "Set to test with a sub directory")
Verbose = flag.Bool("verbose", false, "Set to enable logging")
finalise func()
)
// Write a file
func WriteFile(filePath, content string, t time.Time) {
// FIXME make directories?
filePath = path.Join(localName, filePath)
dirPath := path.Dir(filePath)
err := os.MkdirAll(dirPath, 0770)
if err != nil {
log.Fatalf("Failed to make directories %q: %v", dirPath, err)
}
err = ioutil.WriteFile(filePath, []byte(content), 0600)
if err != nil {
log.Fatalf("Failed to write file %q: %v", filePath, err)
}
err = os.Chtimes(filePath, t, t)
if err != nil {
log.Fatalf("Failed to chtimes file %q: %v", filePath, err)
}
}
var t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
var t2 = fstest.Time("2011-12-25T12:59:59.123456789Z")
var t3 = fstest.Time("2011-12-30T12:59:59.000000000Z")
func TestInit(t *testing.T) {
fs.LoadConfig()
fs.Config.Verbose = *Verbose
fs.Config.Quiet = !*Verbose
var err error
fremote, finalise, err = fstest.RandomRemote(*RemoteName, *SubDir)
if err != nil {
t.Fatalf("Failed to open remote %q: %v", *RemoteName, err)
}
t.Logf("Testing with remote %v", fremote)
localName, err = ioutil.TempDir("", "rclone")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
localName = filepath.ToSlash(localName)
t.Logf("Testing with local %q", localName)
flocal, err = fs.NewFs(localName)
if err != nil {
t.Fatalf("Failed to make %q: %v", remoteName, err)
}
}
func TestCalculateModifyWindow(t *testing.T) {
fs.CalculateModifyWindow(fremote, flocal)
t.Logf("ModifyWindow is %q", fs.Config.ModifyWindow)
}
func TestMkdir(t *testing.T) {
fstest.TestMkdir(t, fremote)
}
// Check dry run is working
func TestCopyWithDryRun(t *testing.T) {
WriteFile("sub dir/hello world", "hello world", t1)
fs.Config.DryRun = true
err := fs.Sync(fremote, flocal, false)
fs.Config.DryRun = false
if err != nil {
t.Fatalf("Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, []fstest.Item{}, fs.Config.ModifyWindow)
}
// Now without dry run
func TestCopy(t *testing.T) {
err := fs.Sync(fremote, flocal, false)
if err != nil {
t.Fatalf("Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestLsd(t *testing.T) {
var buf bytes.Buffer
err := fs.ListDir(fremote, &buf)
if err != nil {
t.Fatalf("ListDir failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, "sub dir\n") {
t.Fatalf("Result wrong %q", res)
}
}
// Now delete the local file and download it
func TestCopyAfterDelete(t *testing.T) {
err := os.Remove(localName + "/sub dir/hello world")
if err != nil {
t.Fatalf("Remove failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, []fstest.Item{}, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestCopyRedownload(t *testing.T) {
err := fs.Sync(flocal, fremote, false)
if err != nil {
t.Fatalf("Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
// Clean the directory
cleanTempDir(t)
}
// Create a file and sync it. Change the last modified date and resync.
// If we're only doing sync by size and checksum, we expect nothing to
// to be transferred on the second sync.
func TestSyncBasedOnCheckSum(t *testing.T) {
cleanTempDir(t)
fs.Config.CheckSum = true
defer func() { fs.Config.CheckSum = false }()
WriteFile("check sum", "", t1)
local_items := []fstest.Item{
{Path: "check sum", Size: 0, ModTime: t1, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fs.Stats.ResetCounters()
err := fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Initial sync failed: %v", err)
}
// We should have transferred exactly one file.
if fs.Stats.GetTransfers() != 1 {
t.Fatalf("Sync 1: want 1 transfer, got %d", fs.Stats.GetTransfers())
}
remote_items := local_items
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
err = os.Chtimes(localName+"/check sum", t2, t2)
if err != nil {
t.Fatalf("Chtimes failed: %v", err)
}
local_items = []fstest.Item{
{Path: "check sum", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fs.Stats.ResetCounters()
err = fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
// We should have transferred no files
if fs.Stats.GetTransfers() != 0 {
t.Fatalf("Sync 2: want 0 transfers, got %d", fs.Stats.GetTransfers())
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
cleanTempDir(t)
}
// Create a file and sync it. Change the last modified date and the
// file contents but not the size. If we're only doing sync by size
// only, we expect nothing to to be transferred on the second sync.
func TestSyncSizeOnly(t *testing.T) {
cleanTempDir(t)
fs.Config.SizeOnly = true
defer func() { fs.Config.SizeOnly = false }()
WriteFile("sizeonly", "potato", t1)
local_items := []fstest.Item{
{Path: "sizeonly", Size: 6, ModTime: t1, Md5sum: "8ee2027983915ec78acc45027d874316"},
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fs.Stats.ResetCounters()
err := fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Initial sync failed: %v", err)
}
// We should have transferred exactly one file.
if fs.Stats.GetTransfers() != 1 {
t.Fatalf("Sync 1: want 1 transfer, got %d", fs.Stats.GetTransfers())
}
remote_items := local_items
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
// Update mtime, md5sum but not length of file
WriteFile("sizeonly", "POTATO", t2)
local_items = []fstest.Item{
{Path: "sizeonly", Size: 6, ModTime: t2, Md5sum: "8ac6f27a282e4938125482607ccfb55f"},
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fs.Stats.ResetCounters()
err = fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
// We should have transferred no files
if fs.Stats.GetTransfers() != 0 {
t.Fatalf("Sync 2: want 0 transfers, got %d", fs.Stats.GetTransfers())
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
cleanTempDir(t)
}
func TestSyncAfterChangingModtimeOnly(t *testing.T) {
WriteFile("empty space", "", t1)
err := os.Chtimes(localName+"/empty space", t2, t2)
if err != nil {
t.Fatalf("Chtimes failed: %v", err)
}
err = fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestSyncAfterAddingAFile(t *testing.T) {
WriteFile("potato", "------------------------------------------------------------", t3)
err := fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
WriteFile("potato", "smaller but same date", t3)
err := fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Sync after changing a file's contents, modtime but not length
func TestSyncAfterChangingContentsOnly(t *testing.T) {
WriteFile("potato", "SMALLER BUT SAME DATE", t2)
err := fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Sync after removing a file and adding a file --dry-run
func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
WriteFile("potato2", "------------------------------------------------------------", t1)
err := os.Remove(localName + "/potato")
if err != nil {
t.Fatalf("Remove failed: %v", err)
}
fs.Config.DryRun = true
err = fs.Sync(fremote, flocal, true)
fs.Config.DryRun = false
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
before := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, before, fs.Config.ModifyWindow)
}
// Sync after removing a file and adding a file
func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
err := fs.Sync(fremote, flocal, true)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestLs(t *testing.T) {
var buf bytes.Buffer
err := fs.List(fremote, &buf)
if err != nil {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, " 0 empty space\n") {
t.Errorf("empty space missing: %q", res)
}
if !strings.Contains(res, " 60 potato2\n") {
t.Errorf("potato2 missing: %q", res)
}
}
func TestLsLong(t *testing.T) {
var buf bytes.Buffer
err := fs.ListLong(fremote, &buf)
if err != nil {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
m1 := regexp.MustCompile(`(?m)^ 0 2011-12-25 12:59:59\.\d{9} empty space$`)
if !m1.MatchString(res) {
t.Errorf("empty space missing: %q", res)
}
m2 := regexp.MustCompile(`(?m)^ 60 2001-02-03 04:05:06\.\d{9} potato2$`)
if !m2.MatchString(res) {
t.Errorf("potato2 missing: %q", res)
}
}
func TestMd5sum(t *testing.T) {
var buf bytes.Buffer
err := fs.Md5sum(fremote, &buf)
if err != nil {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") {
t.Errorf("empty space missing: %q", res)
}
if !strings.Contains(res, "6548b156ea68a4e003e786df99eee76 potato2\n") {
t.Errorf("potato2 missing: %q", res)
}
}
func TestCheck(t *testing.T) {
}
// Clean the temporary directory
func cleanTempDir(t *testing.T) {
t.Logf("Cleaning temporary directory: %q", localName)
err := os.RemoveAll(localName)
if err != nil {
t.Logf("Failed to remove %q: %v", localName, err)
}
}
func TestFinalise(t *testing.T) {
finalise()
cleanTempDir(t)
}

29
fs/test_all.sh Executable file
View File

@@ -0,0 +1,29 @@
#!/bin/bash
go install
REMOTES="
TestSwift:
TestS3:
TestDrive:
TestGoogleCloudStorage:
TestDropbox:
"
function test_remote {
args=$@
echo "@go test $args"
go test $args || {
echo "*** test $args FAILED ***"
exit 1
}
}
test_remote
test_remote --subdir
for remote in $REMOTES; do
test_remote --remote $remote
test_remote --remote $remote --subdir
done
echo "All OK"

3
fs/version.go Normal file
View File

@@ -0,0 +1,3 @@
package fs
const Version = "v1.16"

231
fstest/fstest.go Normal file
View File

@@ -0,0 +1,231 @@
// Utilities for testing the Fs
package fstest
// FIXME put name of test FS in Fs structure
import (
"io/ioutil"
"log"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/ncw/rclone/fs"
)
// Seed the random number generator
func init() {
rand.Seed(time.Now().UnixNano())
}
// Represents an item for checking
type Item struct {
Path string
Md5sum string
ModTime time.Time
Size int64
}
// check the mod time to the given precision
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
dt := modTime.Sub(i.ModTime)
if dt >= precision || dt <= -precision {
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision)
}
}
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
if obj == nil {
t.Fatalf("Object is nil")
}
// Check attributes
Md5sum, err := obj.Md5sum()
if err != nil {
t.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
}
if i.Md5sum != Md5sum {
t.Errorf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
}
if i.Size != obj.Size() {
t.Errorf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
}
i.CheckModTime(t, obj, obj.ModTime(), precision)
}
// Represents all items for checking
type Items struct {
byName map[string]*Item
items []Item
}
// Make an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[items[i].Path] = &items[i]
}
return is
}
// Check off an item
func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
i, ok := is.byName[obj.Remote()]
if !ok {
t.Errorf("Unexpected file %q", obj.Remote())
return
}
delete(is.byName, obj.Remote())
i.Check(t, obj, precision)
}
// Check all done
func (is *Items) Done(t *testing.T) {
if len(is.byName) != 0 {
for name := range is.byName {
log.Printf("Not found %q", name)
}
t.Errorf("%d objects not found", len(is.byName))
}
}
// Checks the fs to see if it has the expected contents
func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision time.Duration) {
is := NewItems(items)
for obj := range f.List() {
if obj == nil {
t.Errorf("Unexpected nil in List()")
continue
}
is.Find(t, obj, precision)
}
is.Done(t)
}
// Checks the fs to see if it has the expected contents
func CheckListing(t *testing.T, f fs.Fs, items []Item) {
precision := f.Precision()
CheckListingWithPrecision(t, f, items, precision)
}
// Parse a time string or explode
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// Create a random string
func RandomString(n int) string {
source := "abcdefghijklmnopqrstuvwxyz0123456789"
out := make([]byte, n)
for i := range out {
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
// Creates a temporary directory name for local remotes
func LocalRemote() (path string, err error) {
path, err = ioutil.TempDir("", "rclone")
if err == nil {
// Now remove the directory
err = os.Remove(path)
}
path = filepath.ToSlash(path)
return
}
// Make a random bucket or subdirectory name
//
// Returns a random remote name plus the leaf name
func RandomRemoteName(remoteName string) (string, string, error) {
var err error
var leafName string
// Make a directory if remote name is null
if remoteName == "" {
remoteName, err = LocalRemote()
if err != nil {
return "", "", err
}
} else {
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
leafName = RandomString(32)
remoteName += leafName
}
return remoteName, leafName, nil
}
// Make a random bucket or subdirectory on the remote
//
// Call the finalise function returned to Purge the fs at the end (and
// the parent if necessary)
func RandomRemote(remoteName string, subdir bool) (fs.Fs, func(), error) {
var err error
var parentRemote fs.Fs
remoteName, _, err = RandomRemoteName(remoteName)
if err != nil {
return nil, nil, err
}
if subdir {
parentRemote, err = fs.NewFs(remoteName)
if err != nil {
return nil, nil, err
}
remoteName += "/" + RandomString(8)
}
remote, err := fs.NewFs(remoteName)
if err != nil {
return nil, nil, err
}
finalise := func() {
_ = fs.Purge(remote) // ignore error
if parentRemote != nil {
err = fs.Purge(parentRemote) // ignore error
if err != nil {
log.Printf("Failed to purge %v: %v", parentRemote, err)
}
}
}
return remote, finalise, nil
}
func TestMkdir(t *testing.T, remote fs.Fs) {
err := fs.Mkdir(remote)
if err != nil {
t.Fatalf("Mkdir failed: %v", err)
}
CheckListing(t, remote, []Item{})
}
func TestPurge(t *testing.T, remote fs.Fs) {
err := fs.Purge(remote)
if err != nil {
t.Fatalf("Purge failed: %v", err)
}
CheckListing(t, remote, []Item{})
}
func TestRmdir(t *testing.T, remote fs.Fs) {
err := fs.Rmdir(remote)
if err != nil {
t.Fatalf("Rmdir failed: %v", err)
}
}

438
fstest/fstests/fstests.go Normal file
View File

@@ -0,0 +1,438 @@
// Generic tests for testing the Fs and Object interfaces
package fstests
import (
"bytes"
"crypto/md5"
"encoding/hex"
"io"
"log"
"os"
"strings"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest"
)
var (
remote fs.Fs
RemoteName = ""
subRemoteName = ""
subRemoteLeaf = ""
NilObject fs.Object
file1 = fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: "file name.txt",
}
file2 = fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`,
}
)
func TestInit(t *testing.T) {
var err error
fs.LoadConfig()
fs.Config.Verbose = false
fs.Config.Quiet = true
if RemoteName == "" {
RemoteName, err = fstest.LocalRemote()
if err != nil {
log.Fatalf("Failed to create tmp dir: %v", err)
}
}
subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(RemoteName)
if err != nil {
t.Fatalf("Couldn't make remote name: %v", err)
}
remote, err = fs.NewFs(subRemoteName)
if err == fs.NotFoundInConfigFile {
log.Printf("Didn't find %q in config file - skipping tests", RemoteName)
return
}
if err != nil {
t.Fatalf("Couldn't start FS: %v", err)
}
fstest.TestMkdir(t, remote)
}
func skipIfNotOk(t *testing.T) {
if remote == nil {
t.Skip("FS not configured")
}
}
// String returns a description of the FS
func TestFsString(t *testing.T) {
skipIfNotOk(t)
str := remote.String()
if str == "" {
t.Fatal("Bad fs.String()")
}
}
type TestFile struct {
ModTime time.Time
Path string
Size int64
Md5sum string
}
func TestFsRmdirEmpty(t *testing.T) {
skipIfNotOk(t)
fstest.TestRmdir(t, remote)
}
func TestFsRmdirNotFound(t *testing.T) {
skipIfNotOk(t)
err := remote.Rmdir()
if err == nil {
t.Fatalf("Expecting error on Rmdir non existent")
}
}
func TestFsMkdir(t *testing.T) {
skipIfNotOk(t)
fstest.TestMkdir(t, remote)
fstest.TestMkdir(t, remote)
}
func TestFsListEmpty(t *testing.T) {
skipIfNotOk(t)
fstest.CheckListing(t, remote, []fstest.Item{})
}
func TestFsListDirEmpty(t *testing.T) {
skipIfNotOk(t)
for obj := range remote.ListDir() {
t.Errorf("Found unexpected item %q", obj.Name)
}
}
func TestFsNewFsObjectNotFound(t *testing.T) {
skipIfNotOk(t)
if remote.NewFsObject("potato") != nil {
t.Fatal("Didn't expect to find object")
}
}
func findObject(t *testing.T, Name string) fs.Object {
obj := remote.NewFsObject(Name)
if obj == nil {
t.Fatalf("Object not found: %q", Name)
}
return obj
}
func testPut(t *testing.T, file *fstest.Item) {
buf := bytes.NewBufferString(fstest.RandomString(100))
hash := md5.New()
in := io.TeeReader(buf, hash)
file.Size = int64(buf.Len())
obj, err := remote.Put(in, file.Path, file.ModTime, file.Size)
if err != nil {
t.Fatal("Put error", err)
}
file.Md5sum = hex.EncodeToString(hash.Sum(nil))
file.Check(t, obj, remote.Precision())
// Re-read the object and check again
obj = findObject(t, file.Path)
file.Check(t, obj, remote.Precision())
}
func TestFsPutFile1(t *testing.T) {
skipIfNotOk(t)
testPut(t, &file1)
}
func TestFsPutFile2(t *testing.T) {
skipIfNotOk(t)
testPut(t, &file2)
}
func TestFsListDirFile2(t *testing.T) {
skipIfNotOk(t)
found := false
for obj := range remote.ListDir() {
if obj.Name != `hello? sausage` {
t.Errorf("Found unexpected item %q", obj.Name)
} else {
found = true
}
}
if !found {
t.Errorf("Didn't find %q", `hello? sausage`)
}
}
func TestFsListDirRoot(t *testing.T) {
skipIfNotOk(t)
rootRemote, err := fs.NewFs(RemoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", RemoteName, err)
}
found := false
for obj := range rootRemote.ListDir() {
if obj.Name == subRemoteLeaf {
found = true
}
}
if !found {
t.Errorf("Didn't find %q", subRemoteLeaf)
}
}
func TestFsListRoot(t *testing.T) {
skipIfNotOk(t)
rootRemote, err := fs.NewFs(RemoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", RemoteName, err)
}
// Should either find file1 and file2 or nothing
found1 := false
file1 := subRemoteLeaf + "/" + file1.Path
found2 := false
file2 := subRemoteLeaf + "/" + file2.Path
count := 0
errors := fs.Stats.GetErrors()
for obj := range rootRemote.List() {
count++
if obj.Remote() == file1 {
found1 = true
}
if obj.Remote() == file2 {
found2 = true
}
}
errors -= fs.Stats.GetErrors()
if count == 0 {
if errors == 0 {
t.Error("Expecting error if count==0")
}
return
}
if found1 && found2 {
if errors != 0 {
t.Error("Not expecting error if found")
}
return
}
t.Errorf("Didn't find %q (%v) and %q (%v) or no files (count %d)", file1, found1, file2, found2, count)
}
func TestFsListFile1(t *testing.T) {
skipIfNotOk(t)
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
}
func TestFsNewFsObject(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
file1.Check(t, obj, remote.Precision())
}
func TestFsListFile1and2(t *testing.T) {
skipIfNotOk(t)
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
}
func TestFsRmdirFull(t *testing.T) {
skipIfNotOk(t)
err := remote.Rmdir()
if err == nil {
t.Fatalf("Expecting error on RMdir on non empty remote")
}
}
func TestFsPrecision(t *testing.T) {
skipIfNotOk(t)
precision := remote.Precision()
if precision > time.Second || precision < 0 {
t.Fatalf("Precision out of range %v", precision)
}
// FIXME check expected precision
}
func TestObjectString(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
s := obj.String()
if s != file1.Path {
t.Errorf("String() wrong %v != %v", s, file1.Path)
}
obj = NilObject
s = obj.String()
if s != "<nil>" {
t.Errorf("String() wrong %v != %v", s, "<nil>")
}
}
func TestObjectFs(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if obj.Fs() != remote {
t.Errorf("Fs is wrong %v != %v", obj.Fs(), remote)
}
}
func TestObjectRemote(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if obj.Remote() != file1.Path {
t.Errorf("Remote is wrong %v != %v", obj.Remote(), file1.Path)
}
}
func TestObjectMd5sum(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
Md5sum, err := obj.Md5sum()
if err != nil {
t.Errorf("Error in Md5sum: %v", err)
}
if Md5sum != file1.Md5sum {
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
}
}
func TestObjectModTime(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
}
func TestObjectSetModTime(t *testing.T) {
skipIfNotOk(t)
newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z")
obj := findObject(t, file1.Path)
obj.SetModTime(newModTime)
file1.ModTime = newModTime
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
// And make a new object and read it from there too
TestObjectModTime(t)
}
func TestObjectSize(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if obj.Size() != file1.Size {
t.Errorf("Size is wrong %v != %v", obj.Size(), file1.Size)
}
}
func TestObjectOpen(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
in, err := obj.Open()
if err != nil {
t.Fatalf("Open() return error: %v", err)
}
hash := md5.New()
n, err := io.Copy(hash, in)
if err != nil {
t.Fatalf("io.Copy() return error: %v", err)
}
if n != file1.Size {
t.Fatalf("Read wrong number of bytes %d != %d", n, file1.Size)
}
err = in.Close()
if err != nil {
t.Fatalf("in.Close() return error: %v", err)
}
Md5sum := hex.EncodeToString(hash.Sum(nil))
if Md5sum != file1.Md5sum {
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
}
}
func TestObjectUpdate(t *testing.T) {
skipIfNotOk(t)
buf := bytes.NewBufferString(fstest.RandomString(200))
hash := md5.New()
in := io.TeeReader(buf, hash)
file1.Size = int64(buf.Len())
obj := findObject(t, file1.Path)
err := obj.Update(in, file1.ModTime, file1.Size)
if err != nil {
t.Fatal("Update error", err)
}
file1.Md5sum = hex.EncodeToString(hash.Sum(nil))
file1.Check(t, obj, remote.Precision())
// Re-read the object and check again
obj = findObject(t, file1.Path)
file1.Check(t, obj, remote.Precision())
}
func TestObjectStorable(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if !obj.Storable() {
t.Fatalf("Expecting %v to be storable", obj)
}
}
func TestLimitedFs(t *testing.T) {
skipIfNotOk(t)
remoteName := subRemoteName + "/" + file2.Path
file2Copy := file2
file2Copy.Path = "z.txt"
fileRemote, err := fs.NewFs(remoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", remoteName, err)
}
fstest.CheckListing(t, fileRemote, []fstest.Item{file2Copy})
_, ok := fileRemote.(*fs.Limited)
if !ok {
t.Errorf("%v is not a fs.Limited", fileRemote)
}
}
func TestLimitedFsNotFound(t *testing.T) {
skipIfNotOk(t)
remoteName := subRemoteName + "/not found.txt"
fileRemote, err := fs.NewFs(remoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", remoteName, err)
}
fstest.CheckListing(t, fileRemote, []fstest.Item{})
_, ok := fileRemote.(*fs.Limited)
if ok {
t.Errorf("%v is is a fs.Limited", fileRemote)
}
}
func TestObjectRemove(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
err := obj.Remove()
if err != nil {
t.Fatal("Remove error", err)
}
fstest.CheckListing(t, remote, []fstest.Item{file2})
}
func TestObjectPurge(t *testing.T) {
skipIfNotOk(t)
fstest.TestPurge(t, remote)
err := fs.Purge(remote)
if err == nil {
t.Fatal("Expecting error after on second purge")
}
}
func TestFinalise(t *testing.T) {
skipIfNotOk(t)
if strings.HasPrefix(RemoteName, "/") {
// Remove temp directory
err := os.Remove(RemoteName)
if err != nil {
log.Printf("Failed to remove %q: %v\n", RemoteName, err)
}
}
}

143
fstest/fstests/gen_tests.go Normal file
View File

@@ -0,0 +1,143 @@
// +build ignore
// Make the test files from fstests.go
package main
import (
"bufio"
"html/template"
"log"
"os"
"os/exec"
"regexp"
"strings"
)
// Search fstests.go and return all the test function names
func findTestFunctions() []string {
fns := []string{}
matcher := regexp.MustCompile(`^func\s+(Test.*?)\(`)
in, err := os.Open("fstests.go")
if err != nil {
log.Fatalf("Couldn't open fstests.go: %v", err)
}
defer in.Close()
scanner := bufio.NewScanner(in)
for scanner.Scan() {
line := scanner.Text()
matches := matcher.FindStringSubmatch(line)
if len(matches) > 0 {
fns = append(fns, matches[1])
}
}
if err := scanner.Err(); err != nil {
log.Fatalf("Error scanning file: %v", err)
}
return fns
}
// Data to substitute
type Data struct {
Regenerate string
FsName string
UpperFsName string
TestName string
ObjectName string
Fns []string
}
var testProgram = `
// Test {{ .UpperFsName }} filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: {{ .Regenerate }}
package {{ .FsName }}_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/{{ .FsName }}"
)
func init() {
fstests.NilObject = fs.Object((*{{ .FsName }}.FsObject{{ .ObjectName }})(nil))
fstests.RemoteName = "{{ .TestName }}"
}
// Generic tests for the Fs
{{ range $fn := .Fns }}func {{ $fn }}(t *testing.T){ fstests.{{ $fn }}(t) }
{{ end }}
`
// Generate test file piping it through gofmt
func generateTestProgram(t *template.Template, fns []string, Fsname string) {
fsname := strings.ToLower(Fsname)
TestName := "Test" + Fsname + ":"
outfile := "../../" + fsname + "/" + fsname + "_test.go"
// Find last capitalised group to be object name
matcher := regexp.MustCompile(`([A-Z][a-z0-9]+)$`)
matches := matcher.FindStringSubmatch(Fsname)
if len(matches) == 0 {
log.Fatalf("Couldn't find object name in %q", Fsname)
}
ObjectName := matches[1]
if fsname == "local" {
TestName = ""
}
data := Data{
Regenerate: "go run gen_tests.go or make gen_tests",
FsName: fsname,
UpperFsName: Fsname,
TestName: TestName,
ObjectName: ObjectName,
Fns: fns,
}
cmd := exec.Command("gofmt")
log.Printf("Writing %q", outfile)
out, err := os.Create(outfile)
if err != nil {
log.Fatal(err)
}
cmd.Stdout = out
gofmt, err := cmd.StdinPipe()
if err != nil {
log.Fatal(err)
}
if err = cmd.Start(); err != nil {
log.Fatal(err)
}
if err = t.Execute(gofmt, data); err != nil {
log.Fatal(err)
}
if err = gofmt.Close(); err != nil {
log.Fatal(err)
}
if err = cmd.Wait(); err != nil {
log.Fatal(err)
}
if err = out.Close(); err != nil {
log.Fatal(err)
}
}
func main() {
fns := findTestFunctions()
t := template.Must(template.New("main").Parse(testProgram))
generateTestProgram(t, fns, "Local")
generateTestProgram(t, fns, "Swift")
generateTestProgram(t, fns, "S3")
generateTestProgram(t, fns, "Drive")
generateTestProgram(t, fns, "GoogleCloudStorage")
generateTestProgram(t, fns, "Dropbox")
log.Printf("Done")
}

137
googleauth/googleauth.go Normal file
View File

@@ -0,0 +1,137 @@
// Common authentication between Google Drive and Google Cloud Storage
package googleauth
import (
"encoding/json"
"fmt"
"log"
"code.google.com/p/goauth2/oauth"
"github.com/ncw/rclone/fs"
)
// A token cache to save the token in the config file section named
type TokenCache string
// Get the token from the config file - returns an error if it isn't present
func (name TokenCache) Token() (*oauth.Token, error) {
tokenString, err := fs.ConfigFile.GetValue(string(name), "token")
if err != nil {
return nil, err
}
if tokenString == "" {
return nil, fmt.Errorf("Empty token found - please reconfigure")
}
token := new(oauth.Token)
err = json.Unmarshal([]byte(tokenString), token)
if err != nil {
return nil, err
}
return token, nil
}
// Save the token to the config file
//
// This saves the config file if it changes
func (name TokenCache) PutToken(token *oauth.Token) error {
tokenBytes, err := json.Marshal(token)
if err != nil {
return err
}
tokenString := string(tokenBytes)
old := fs.ConfigFile.MustValue(string(name), "token")
if tokenString != old {
fs.ConfigFile.SetValue(string(name), "token", tokenString)
fs.SaveConfig()
}
return nil
}
// Auth contains information to authenticate an app against google services
type Auth struct {
Scope string
DefaultClientId string
DefaultClientSecret string
}
// Makes a new transport using authorisation from the config
//
// Doesn't have a token yet
func (auth *Auth) newTransport(name string) (*oauth.Transport, error) {
clientId := fs.ConfigFile.MustValue(name, "client_id")
if clientId == "" {
clientId = auth.DefaultClientId
}
clientSecret := fs.ConfigFile.MustValue(name, "client_secret")
if clientSecret == "" {
clientSecret = auth.DefaultClientSecret
}
// Settings for authorization.
var config = &oauth.Config{
ClientId: clientId,
ClientSecret: clientSecret,
Scope: auth.Scope,
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
AuthURL: "https://accounts.google.com/o/oauth2/auth",
TokenURL: "https://accounts.google.com/o/oauth2/token",
TokenCache: TokenCache(name),
}
t := &oauth.Transport{
Config: config,
Transport: fs.Config.Transport(),
}
return t, nil
}
// Makes a new transport using authorisation from the config with token
func (auth *Auth) NewTransport(name string) (*oauth.Transport, error) {
t, err := auth.newTransport(name)
if err != nil {
return nil, err
}
// Try to pull the token from the cache; if this fails, we need to get one.
token, err := t.Config.TokenCache.Token()
if err != nil {
return nil, fmt.Errorf("Failed to get token: %s", err)
}
t.Token = token
return t, nil
}
// Configuration helper - called after the user has put in the defaults
func (auth *Auth) Config(name string) {
// See if already have a token
tokenString := fs.ConfigFile.MustValue(name, "token")
if tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !fs.Confirm() {
return
}
}
// Get a transport
t, err := auth.newTransport(name)
if err != nil {
log.Fatalf("Couldn't make transport: %v", err)
}
// Generate a URL for the user to visit for authorization.
authUrl := t.Config.AuthCodeURL("state")
fmt.Printf("Go to the following link in your browser\n")
fmt.Printf("%s\n", authUrl)
fmt.Printf("Log in, then type paste the token that is returned in the browser here\n")
// Read the code, and exchange it for a token.
fmt.Printf("Enter verification code> ")
authCode := fs.ReadLine()
_, err = t.Exchange(authCode)
if err != nil {
log.Fatalf("Failed to get token: %v", err)
}
}

View File

@@ -0,0 +1,574 @@
// Google Cloud Storage interface
package googlecloudstorage
/*
Notes
Can't set Updated but can set Metadata on object creation
Patch needs full_control not just read_write
FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 error
- https://code.google.com/p/google-api-go-client/issues/detail?id=64
*/
import (
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"path"
"regexp"
"strings"
"time"
"google.golang.org/api/googleapi"
"google.golang.org/api/storage/v1"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/googleauth"
)
const (
rcloneClientId = "202264815644.apps.googleusercontent.com"
rcloneClientSecret = "X4Z3ca8xfWDb1Voo-F9a7ZxJ"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 256 // chunk size to read directory listings
)
var (
// Description of how to auth for this app
storageAuth = &googleauth.Auth{
Scope: storage.DevstorageFullControlScope,
DefaultClientId: rcloneClientId,
DefaultClientSecret: rcloneClientSecret,
}
)
// Register with Fs
func init() {
fs.Register(&fs.FsInfo{
Name: "google cloud storage",
NewFs: NewFs,
Config: func(name string) {
storageAuth.Config(name)
},
Options: []fs.Option{{
Name: "client_id",
Help: "Google Application Client Id - leave blank to use rclone's.",
}, {
Name: "client_secret",
Help: "Google Application Client Secret - leave blank to use rclone's.",
}, {
Name: "project_number",
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
}, {
Value: "bucketOwnerFullControl",
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
}, {
Value: "bucketOwnerRead",
Help: "Object owner gets OWNER access, and project team owners get READER access.",
}, {
Value: "private",
Help: "Object owner gets OWNER access [default if left blank].",
}, {
Value: "projectPrivate",
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Object owner gets OWNER access, and all Users get READER access.",
}},
}, {
Name: "bucket_acl",
Help: "Access Control List for new buckets.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
}, {
Value: "private",
Help: "Project team owners get OWNER access [default if left blank].",
}, {
Value: "projectPrivate",
Help: "Project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Project team owners get OWNER access, and all Users get READER access.",
}, {
Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
}},
}},
})
}
// FsStorage represents a remote storage server
type FsStorage struct {
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
root string // the path we are working on if any
projectNumber string // used for finding buckets
objectAcl string // used when creating new objects
bucketAcl string // used when creating new buckets
}
// FsObjectStorage describes a storage object
//
// Will definitely have info but maybe not meta
type FsObjectStorage struct {
storage *FsStorage // what this object is part of
remote string // The remote path
url string // download path
md5sum string // The MD5Sum of the object
bytes int64 // Bytes in the object
modTime time.Time // Modified time of the object
}
// ------------------------------------------------------------
// String converts this FsStorage to a string
func (f *FsStorage) String() string {
if f.root == "" {
return fmt.Sprintf("Storage bucket %s", f.bucket)
}
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
}
// Pattern to match a storage path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a storage 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = fmt.Errorf("Couldn't find bucket in storage path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// NewFs contstructs an FsStorage from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
t, err := storageAuth.NewTransport(name)
if err != nil {
return nil, err
}
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
f := &FsStorage{
bucket: bucket,
root: directory,
projectNumber: fs.ConfigFile.MustValue(name, "project_number"),
objectAcl: fs.ConfigFile.MustValue(name, "object_acl"),
bucketAcl: fs.ConfigFile.MustValue(name, "bucket_acl"),
}
if f.objectAcl == "" {
f.objectAcl = "private"
}
if f.bucketAcl == "" {
f.bucketAcl = "private"
}
// Create a new authorized Drive client.
f.client = t.Client()
f.svc, err = storage.New(f.client)
if err != nil {
return nil, fmt.Errorf("Couldn't create Google Cloud Storage client: %s", err)
}
if f.root != "" {
f.root += "/"
// Check to see if the object exists
_, err = f.svc.Objects.Get(bucket, directory).Do()
if err == nil {
remote := path.Base(directory)
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
obj := f.NewFsObject(remote)
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
}
}
return f, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsStorage) newFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
o := &FsObjectStorage{
storage: f,
remote: remote,
}
if info != nil {
o.setMetaData(info)
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
// logged already FsDebug("Failed to read info: %s", err)
return nil
}
}
return o
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsStorage) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
list := f.svc.Objects.List(f.bucket).Prefix(f.root).MaxResults(listChunks)
if directories {
list = list.Delimiter("/")
}
rootLength := len(f.root)
for {
objects, err := list.Do()
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
return
}
if !directories {
for _, object := range objects.Items {
if !strings.HasPrefix(object.Name, f.root) {
fs.Log(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
fn(remote, object)
}
} else {
var object storage.Object
for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") {
continue
}
fn(prefix[:len(prefix)-1], &object)
}
}
if objects.NextPageToken == "" {
break
}
list.PageToken(objects.NextPageToken)
}
}
// Walk the path returning a channel of FsObjects
func (f *FsStorage) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
if f.bucket == "" {
// Return no objects at top level list
close(out)
fs.Stats.Error()
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
} else {
// List the objects
go func() {
defer close(out)
f.list(false, func(remote string, object *storage.Object) {
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
}()
}
return out
}
// Lists the buckets
func (f *FsStorage) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
if f.bucket == "" {
// List the buckets
go func() {
defer close(out)
if f.projectNumber == "" {
fs.Stats.Error()
fs.Log(f, "Can't list buckets without project number")
return
}
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
for {
buckets, err := listBuckets.Do()
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list buckets: %v", err)
break
} else {
for _, bucket := range buckets.Items {
out <- &fs.Dir{
Name: bucket.Name,
Bytes: 0,
Count: 0,
}
}
}
if buckets.NextPageToken == "" {
break
}
listBuckets.PageToken(buckets.NextPageToken)
}
}()
} else {
// List the directories in the path in the bucket
go func() {
defer close(out)
f.list(true, func(remote string, object *storage.Object) {
out <- &fs.Dir{
Name: remote,
Bytes: int64(object.Size),
Count: 0,
}
})
}()
}
return out
}
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *FsStorage) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction
o := &FsObjectStorage{storage: f, remote: remote}
return o, o.Update(in, modTime, size)
}
// Mkdir creates the bucket if it doesn't exist
func (f *FsStorage) Mkdir() error {
_, err := f.svc.Buckets.Get(f.bucket).Do()
if err == nil {
// Bucket already exists
return nil
}
if f.projectNumber == "" {
return fmt.Errorf("Can't make bucket without project number")
}
bucket := storage.Bucket{
Name: f.bucket,
}
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketAcl).Do()
return err
}
// Rmdir deletes the bucket
//
// Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty.
func (f *FsStorage) Rmdir() error {
return f.svc.Buckets.Delete(f.bucket).Do()
}
// Return the precision
func (fs *FsStorage) Precision() time.Duration {
return time.Nanosecond
}
// ------------------------------------------------------------
// Return the parent Fs
func (o *FsObjectStorage) Fs() fs.Fs {
return o.storage
}
// Return a string version
func (o *FsObjectStorage) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Return the remote path
func (o *FsObjectStorage) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *FsObjectStorage) Md5sum() (string, error) {
return o.md5sum, nil
}
// Size returns the size of an object in bytes
func (o *FsObjectStorage) Size() int64 {
return o.bytes
}
// setMetaData sets the fs data from a storage.Object
func (o *FsObjectStorage) setMetaData(info *storage.Object) {
o.url = info.MediaLink
o.bytes = int64(info.Size)
// Read md5sum
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
if err != nil {
fs.Log(o, "Bad MD5 decode: %v", err)
} else {
o.md5sum = hex.EncodeToString(md5sumData)
}
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(timeFormatIn, mtimeString)
if err == nil {
o.modTime = modTime
return
} else {
fs.Debug(o, "Failed to read mtime from metadata: %s", err)
}
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormatIn, info.Updated)
if err != nil {
fs.Log(o, "Bad time decode: %v", err)
} else {
o.modTime = modTime
}
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *FsObjectStorage) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
object, err := o.storage.svc.Objects.Get(o.storage.bucket, o.storage.root+o.remote).Do()
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
}
o.setMetaData(object)
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *FsObjectStorage) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
// fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
return o.modTime
}
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormatOut)
return metadata
}
// Sets the modification time of the local fs object
func (o *FsObjectStorage) SetModTime(modTime time.Time) {
// This only adds metadata so will perserve other metadata
object := storage.Object{
Bucket: o.storage.bucket,
Name: o.storage.root + o.remote,
Metadata: metadataFromModTime(modTime),
}
newObject, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
}
o.setMetaData(newObject)
}
// Is this object storable
func (o *FsObjectStorage) Storable() bool {
return true
}
// Open an object for read
func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
// This is slightly complicated by Go here insisting on
// decoding the %2F in URLs into / which is legal in http, but
// unfortunately not what the storage server wants.
//
// So first encode all the % into their encoded form
// URL will decode them giving our original escaped string
url := strings.Replace(o.url, "%", "%25", -1)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
// SetOpaque sets Opaque such that HTTP requests to it don't
// alter any hex-escaped characters
googleapi.SetOpaque(req.URL)
req.Header.Set("User-Agent", fs.UserAgent)
res, err := o.storage.client.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
_ = res.Body.Close() // ignore error
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *FsObjectStorage) Update(in io.Reader, modTime time.Time, size int64) error {
object := storage.Object{
Bucket: o.storage.bucket,
Name: o.storage.root + o.remote,
ContentType: fs.MimeType(o),
Size: uint64(size),
Updated: modTime.Format(timeFormatOut), // Doesn't get set
Metadata: metadataFromModTime(modTime),
}
newObject, err := o.storage.svc.Objects.Insert(o.storage.bucket, &object).Media(in).Name(object.Name).PredefinedAcl(o.storage.objectAcl).Do()
if err != nil {
return err
}
// Set the metadata for the new object while we have it
o.setMetaData(newObject)
return nil
}
// Remove an object
func (o *FsObjectStorage) Remove() error {
return o.storage.svc.Objects.Delete(o.storage.bucket, o.storage.root+o.remote).Do()
}
// Check the interfaces are satisfied
var _ fs.Fs = &FsStorage{}
var _ fs.Object = &FsObjectStorage{}

View File

@@ -0,0 +1,53 @@
// Test GoogleCloudStorage filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: go run gen_tests.go or make gen_tests
package googlecloudstorage_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/googlecloudstorage"
)
func init() {
fstests.NilObject = fs.Object((*googlecloudstorage.FsObjectStorage)(nil))
fstests.RemoteName = "TestGoogleCloudStorage:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

BIN
graphics/rclone-256x256.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

BIN
graphics/rclone-64x64.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.7 KiB

View File

@@ -1,17 +1,24 @@
// Local filesystem interface
package local
// Note that all rclone paths should be / separated. Anything coming
// from the filepath module will have \ separators on windows so
// should be converted using filepath.ToSlash. Windows is quite happy
// with / separators so there is no need to convert them back.
import (
"crypto/md5"
"encoding/hex"
"fmt"
"hash"
"io"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"sync"
"time"
"unicode/utf8"
"github.com/ncw/rclone/fs"
)
@@ -26,25 +33,30 @@ func init() {
// FsLocal represents a local filesystem rooted at root
type FsLocal struct {
root string // The root directory
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
root string // The root directory
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warned map[string]struct{} // whether we have warned about this string
}
// FsObjectLocal represents a local filesystem object
type FsObjectLocal struct {
local fs.Fs // The Fs this object is part of
local *FsLocal // The Fs this object is part of
remote string // The remote path
path string // The local path
info os.FileInfo // Interface for file info
info os.FileInfo // Interface for file info (always present)
md5sum string // the md5sum of the object or "" if not calculated
}
// ------------------------------------------------------------
// NewFs contstructs an FsLocal from the path
func NewFs(name, root string) (fs.Fs, error) {
root = path.Clean(root)
f := &FsLocal{root: root}
root = filepath.ToSlash(path.Clean(root))
f := &FsLocal{
root: root,
warned: make(map[string]struct{}),
}
// Check to see if this points to a file
fi, err := os.Lstat(f.root)
if err == nil && fi.Mode().IsRegular() {
@@ -66,8 +78,9 @@ func (f *FsLocal) String() string {
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsLocal) NewFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
path := filepath.Join(f.root, remote)
func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
remote = filepath.ToSlash(remote)
path := path.Join(f.root, remote)
o := &FsObjectLocal{local: f, remote: remote, path: path}
if info != nil {
o.info = info
@@ -85,7 +98,7 @@ func (f *FsLocal) NewFsObjectWithInfo(remote string, info os.FileInfo) fs.Object
//
// May return nil if an error occurred
func (f *FsLocal) NewFsObject(remote string) fs.Object {
return f.NewFsObjectWithInfo(remote, nil)
return f.newFsObjectWithInfo(remote, nil)
}
// List the path returning a channel of FsObjects
@@ -97,19 +110,19 @@ func (f *FsLocal) List() fs.ObjectsChan {
err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
fs.Stats.Error()
log.Printf("Failed to open directory: %s: %s", path, err)
fs.Log(f, "Failed to open directory: %s: %s", path, err)
} else {
remote, err := filepath.Rel(f.root, path)
if err != nil {
fs.Stats.Error()
log.Printf("Failed to get relative path %s: %s", path, err)
fs.Log(f, "Failed to get relative path %s: %s", path, err)
return nil
}
if remote == "." {
return nil
// remote = ""
}
if fs := f.NewFsObjectWithInfo(remote, fi); fs != nil {
if fs := f.newFsObjectWithInfo(remote, fi); fs != nil {
if fs.Storable() {
out <- fs
}
@@ -119,13 +132,27 @@ func (f *FsLocal) List() fs.ObjectsChan {
})
if err != nil {
fs.Stats.Error()
log.Printf("Failed to open directory: %s: %s", f.root, err)
fs.Log(f, "Failed to open directory: %s: %s", f.root, err)
}
close(out)
}()
return out
}
// CleanUtf8 makes string a valid UTF-8 string
//
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
func (f *FsLocal) cleanUtf8(name string) string {
if utf8.ValidString(name) {
return name
}
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
return string([]rune(name))
}
// Walk the path returning a channel of FsObjects
func (f *FsLocal) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
@@ -134,12 +161,12 @@ func (f *FsLocal) ListDir() fs.DirChan {
items, err := ioutil.ReadDir(f.root)
if err != nil {
fs.Stats.Error()
log.Printf("Couldn't find read directory: %s", err)
fs.Log(f, "Couldn't find read directory: %s", err)
} else {
for _, item := range items {
if item.IsDir() {
dir := &fs.Dir{
Name: item.Name(),
Name: f.cleanUtf8(item.Name()),
When: item.ModTime(),
Bytes: 0,
Count: 0,
@@ -149,7 +176,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
err := filepath.Walk(dirpath, func(path string, fi os.FileInfo, err error) error {
if err != nil {
fs.Stats.Error()
log.Printf("Failed to open directory: %s: %s", path, err)
fs.Log(f, "Failed to open directory: %s: %s", path, err)
} else {
dir.Count += 1
dir.Bytes += fi.Size()
@@ -158,7 +185,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
})
if err != nil {
fs.Stats.Error()
log.Printf("Failed to open directory: %s: %s", dirpath, err)
fs.Log(f, "Failed to open directory: %s: %s", dirpath, err)
}
out <- dir
}
@@ -171,10 +198,14 @@ func (f *FsLocal) ListDir() fs.DirChan {
// Puts the FsObject to the local filesystem
func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
dstPath := filepath.Join(f.root, remote)
// Temporary FsObject under construction
fs := &FsObjectLocal{local: f, remote: remote, path: dstPath}
return fs, fs.Update(in, modTime, size)
dstPath := path.Join(f.root, remote)
// Temporary FsObject under construction - info filled in by Update()
o := &FsObjectLocal{local: f, remote: remote, path: dstPath}
err := o.Update(in, modTime, size)
if err != nil {
return nil, err
}
return o, nil
}
// Mkdir creates the directory if it doesn't exist
@@ -211,12 +242,15 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
}
path := fd.Name()
// fmt.Println("Created temp file", path)
fd.Close()
err = fd.Close()
if err != nil {
return time.Second
}
// Delete it on return
defer func() {
// fmt.Println("Remove temp file")
os.Remove(path)
_ = os.Remove(path) // ignore error
}()
// Find the minimum duration we can detect
@@ -246,6 +280,22 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
return
}
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *FsLocal) Purge() error {
fi, err := os.Lstat(f.root)
if err != nil {
return err
}
if !fi.Mode().IsDir() {
return fmt.Errorf("Can't Purge non directory: %q", f.root)
}
return os.RemoveAll(f.root)
}
// ------------------------------------------------------------
// Return the parent Fs
@@ -263,26 +313,35 @@ func (o *FsObjectLocal) String() string {
// Return the remote path
func (o *FsObjectLocal) Remote() string {
return o.remote
return o.local.cleanUtf8(o.remote)
}
// Md5sum calculates the Md5sum of a file returning a lowercase hex string
func (o *FsObjectLocal) Md5sum() (string, error) {
if o.md5sum != "" {
return o.md5sum, nil
}
in, err := os.Open(o.path)
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to open: %s", err)
return "", err
}
defer in.Close() // FIXME ignoring error
hash := md5.New()
_, err = io.Copy(hash, in)
closeErr := in.Close()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to read: %s", err)
return "", err
}
return fmt.Sprintf("%x", hash.Sum(nil)), nil
if closeErr != nil {
fs.Stats.Error()
fs.Log(o, "Failed to close: %s", closeErr)
return "", closeErr
}
o.md5sum = hex.EncodeToString(hash.Sum(nil))
return o.md5sum, nil
}
// Size returns the size of an object in bytes
@@ -300,6 +359,13 @@ func (o *FsObjectLocal) SetModTime(modTime time.Time) {
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
fs.Debug(o, "Failed to set mtime on file: %s", err)
return
}
// Re-read metadata
err = o.lstat()
if err != nil {
fs.Debug(o, "Failed to stat: %s", err)
return
}
}
@@ -310,15 +376,53 @@ func (o *FsObjectLocal) Storable() bool {
fs.Debug(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
fs.Debug(o, "FIXME Skipping directory")
// fs.Debug(o, "Skipping directory")
return false
}
return true
}
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
// object that is read
type localOpenFile struct {
o *FsObjectLocal // object that is open
in io.ReadCloser // handle we are wrapping
hash hash.Hash // currently accumulating MD5
}
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
n, err = file.in.Read(p)
if n > 0 {
// Hash routines never return an error
_, _ = file.hash.Write(p[:n])
}
return
}
// Close the object and update the md5sum
func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
file.o.md5sum = hex.EncodeToString(file.hash.Sum(nil))
} else {
file.o.md5sum = ""
}
return err
}
// Open an object for read
func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
in, err = os.Open(o.path)
if err != nil {
return
}
// Update the md5sum as we go along
in = &localOpenFile{
o: o,
in: in,
hash: md5.New(),
}
return
}
@@ -335,6 +439,10 @@ func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) erro
return err
}
// Calculate the md5sum of the object we are reading as we go along
hash := md5.New()
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
outErr := out.Close()
if err != nil {
@@ -344,9 +452,14 @@ func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) erro
return outErr
}
// All successful so update the md5sum
o.md5sum = hex.EncodeToString(hash.Sum(nil))
// Set the mtime
o.SetModTime(modTime)
return nil
// ReRead info now that we have finished
return o.lstat()
}
// Stat a FsObject into info
@@ -363,4 +476,5 @@ func (o *FsObjectLocal) Remove() error {
// Check the interfaces are satisfied
var _ fs.Fs = &FsLocal{}
var _ fs.Purger = &FsLocal{}
var _ fs.Object = &FsObjectLocal{}

53
local/local_test.go Normal file
View File

@@ -0,0 +1,53 @@
// Test Local filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: go run gen_tests.go or make gen_tests
package local_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/local"
)
func init() {
fstests.NilObject = fs.Object((*local.FsObjectLocal)(nil))
fstests.RemoteName = ""
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

77
make_manual.py Executable file
View File

@@ -0,0 +1,77 @@
#!/usr/bin/python
"""
Make single page versions of the documentation for release and
conversion into man pages etc.
"""
import os
import re
from datetime import datetime
docpath = "docs/content"
outfile = "MANUAL.md"
# Order to add docs segments to make outfile
docs = [
"about.md",
"install.md",
"docs.md",
"drive.md",
"s3.md",
"swift.md",
"dropbox.md",
"googlecloudstorage.md",
"local.md",
"changelog.md",
"bugs.md",
"licence.md",
"authors.md",
"contact.md",
]
# Docs which aren't made into outfile
ignore_docs = [
"downloads.md",
]
def read_doc(doc):
"""Read file as a string"""
path = os.path.join(docpath, doc)
with open(path) as fd:
contents = fd.read()
parts = contents.split("---\n", 2)
if len(parts) != 3:
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
contents = parts[2].strip()+"\n\n"
# Remove icons
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
# Make [...](/links/) absolute
contents = re.sub(r'\((\/.*?\/)\)', r"(http://rclone.org\1)", contents)
return contents
def check_docs(docpath):
"""Check all the docs are in docpath"""
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
files -= set(ignore_docs)
docs_set = set(docs)
if files == docs_set:
return
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
raise ValueError("Missing files")
def main():
check_docs(docpath)
with open(outfile, "w") as out:
out.write("""\
%% rclone(1) User Manual
%% Nick Craig-Wood
%% %s
""" % datetime.now().strftime("%b %d, %Y"))
for doc in docs:
out.write(read_doc(doc))
print "Written '%s'" % outfile
if __name__ == "__main__":
main()

View File

@@ -1,3 +1,26 @@
Perhaps make Md5sum() and Modtime() optional. Define the zero values
"" and 0. Make it so we can support remotes which can't do those.
Fix the docs
* factor the README.md into the docs directory
* create it as part of make by assembling other parts
* write long docs about each flag
Change lsd command so it doesn't show -1
* Make sure all Fses show -1 for objects Zero for dates etc
* Make test?
Put the TestRemote names into the Fs description
Make test_all.sh use the TestRemote name automatically
Run errcheck and go vet in the make file
.. Also race detector?
.. go tool vet -shadow
Get rid of Storable?
Write developer manual
Todo
* FIXME: More -dry-run checks for object transfer
* Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files
@@ -5,16 +28,23 @@ Todo
* if object.PseudoDirectory {
* fmt.Printf("%9s %19s %s\n", "Directory", "-", fs.Remote())
* Make Account wrapper
* limit bandwidth for a pool of all individual connectinos
* do timeouts by setting a limit, seeing whether io has happened
and resetting it if it has
* make Account do progress meter
* Make logging controllable with flags (mostly done)
* -timeout: Make all timeouts be settable with command line parameters
* Windows paths? Do we need to translate / and \?
* Make a fs.Errorf and count errors and log them at a different level
* Add max object size to fs metadata - 5GB for swift, infinite for local, ? for s3
* tie into -max-size flag
* FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs
creation in common code? Or try for as much as possible?
* FIXME Account all the transactons (ls etc) using a different
Roundtripper wrapper which wraps the transactions?
More rsync features
* include
* exclude
* max size
* -c, --checksum skip based on checksum, not mod-time & size
Ideas for flags
* --retries N flag which would make rclone retry a sync until successful or it tried N times.
Ideas
* could do encryption - put IV into metadata?
@@ -23,42 +53,10 @@ Ideas
* support
* sftp
* scp
* Google cloud storage: https://developers.google.com/storage/
* rsync over ssh
* dropbox: https://github.com/nickoneill/go-dropbox (no MD5s)
* control times sync (which is slow) with -a --archive flag?
Need to make directory objects otherwise can't upload an empty directory
* Or could upload empty directories only?
* Can't purge a local filesystem because it leaves the directories behind
Copying a single file? Or maybe with a glob pattern? Could do with LimitedFs
s3
* Can maybe set last modified?
* https://forums.aws.amazon.com/message.jspa?messageID=214062
* Otherwise can set metadata
* Returns etag and last modified in bucket list
* control times sync (which is slow with some remotes) with -a --archive flag?
* Copy a glob pattern - could do with LimitedFs
Bugs
* Non verbose - not sure number transferred got counted up? CHECK
* When doing copy it recurses the whole of the destination FS which isn't necessary
Making a release
* go build ./...
* cd rclonetest
* go build
* ./rclonetest memstore:
* ./rclonetest s3:
* ./rclonetest drive2:
* ./rclonetest /tmp/z
* cd ..
* make tag
* edit README.md Changelog
* git commit version.go rclonetest/version.go README.md docs/content/downloads.md
* make retag
* . ~/bin/go-cross
* make cross
* make upload
* make upload_website
* git push --tags

100
rclone.go
View File

@@ -17,6 +17,8 @@ import (
"github.com/ncw/rclone/fs"
// Active file systems
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
@@ -26,8 +28,9 @@ import (
var (
// Flags
cpuprofile = pflag.StringP("cpuprofile", "", "", "Write cpu profile to file")
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats")
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats (0 to disable)")
version = pflag.BoolP("version", "V", false, "Print the version number")
logFile = pflag.StringP("log-file", "", "", "Log everything to this file")
)
type Command struct {
@@ -56,10 +59,10 @@ func (cmd *Command) checkArgs(args []string) {
var Commands = []Command{
{
Name: "copy",
ArgsHelp: "source://path dest://path",
ArgsHelp: "source:path dest:path",
Help: `
Copy the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
unchanged files, testing by size and modification time or
MD5SUM. Doesn't delete files from the destination.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Sync(fdst, fsrc, false)
@@ -72,13 +75,13 @@ var Commands = []Command{
},
{
Name: "sync",
ArgsHelp: "source://path dest://path",
ArgsHelp: "source:path dest:path",
Help: `
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the --dry-run flag.`,
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the --dry-run flag.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Sync(fdst, fsrc, true)
if err != nil {
@@ -90,11 +93,11 @@ var Commands = []Command{
},
{
Name: "ls",
ArgsHelp: "[remote://path]",
ArgsHelp: "[remote:path]",
Help: `
List all the objects in the the path.`,
List all the objects in the the path with size and path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.List(fdst)
err := fs.List(fdst, os.Stdout)
if err != nil {
log.Fatalf("Failed to list: %v", err)
}
@@ -104,11 +107,11 @@ var Commands = []Command{
},
{
Name: "lsd",
ArgsHelp: "[remote://path]",
ArgsHelp: "[remote:path]",
Help: `
List all directories/containers/buckets in the the path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.ListDir(fdst)
err := fs.ListDir(fdst, os.Stdout)
if err != nil {
log.Fatalf("Failed to listdir: %v", err)
}
@@ -116,9 +119,39 @@ var Commands = []Command{
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "lsl",
ArgsHelp: "[remote:path]",
Help: `
List all the objects in the the path with modification time,
size and path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.ListLong(fdst, os.Stdout)
if err != nil {
log.Fatalf("Failed to list long: %v", err)
}
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "md5sum",
ArgsHelp: "[remote:path]",
Help: `
Produces an md5sum file for all the objects in the path. This
is in the same format as the standard md5sum tool produces.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Md5sum(fdst, os.Stdout)
if err != nil {
log.Fatalf("Failed to list: %v", err)
}
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "mkdir",
ArgsHelp: "remote://path",
ArgsHelp: "remote:path",
Help: `
Make the path if it doesn't already exist`,
Run: func(fdst, fsrc fs.Fs) {
@@ -132,7 +165,7 @@ var Commands = []Command{
},
{
Name: "rmdir",
ArgsHelp: "remote://path",
ArgsHelp: "remote:path",
Help: `
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.`,
@@ -147,7 +180,7 @@ var Commands = []Command{
},
{
Name: "purge",
ArgsHelp: "remote://path",
ArgsHelp: "remote:path",
Help: `
Remove the path and all of its contents.`,
Run: func(fdst, fsrc fs.Fs) {
@@ -161,7 +194,7 @@ var Commands = []Command{
},
{
Name: "check",
ArgsHelp: "source://path dest://path",
ArgsHelp: "source:path dest:path",
Help: `
Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
@@ -200,7 +233,7 @@ Syntax: [options] subcommand <parameters> <parameters...>
Subcommands:
`, Version)
`, fs.Version)
for i := range Commands {
cmd := &Commands[i]
fmt.Fprintf(os.Stderr, " %s %s\n", cmd.Name, cmd.ArgsHelp)
@@ -210,7 +243,8 @@ Subcommands:
fmt.Fprintf(os.Stderr, "Options:\n")
pflag.PrintDefaults()
fmt.Fprintf(os.Stderr, `
It is only necessary to use a unique prefix of the subcommand, eg 'up' for 'upload'.
It is only necessary to use a unique prefix of the subcommand, eg 'up'
for 'upload'.
`)
}
@@ -235,7 +269,11 @@ func ParseFlags() {
fs.Stats.Error()
log.Fatal(err)
}
pprof.StartCPUProfile(f)
err = pprof.StartCPUProfile(f)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
defer pprof.StopCPUProfile()
}
}
@@ -289,6 +327,9 @@ func NewFs(remote string) fs.Fs {
// Print the stats every statsInterval
func StartStats() {
if *statsInterval <= 0 {
return
}
go func() {
ch := time.Tick(*statsInterval)
for {
@@ -301,11 +342,22 @@ func StartStats() {
func main() {
ParseFlags()
if *version {
fmt.Printf("rclone %s\n", Version)
fmt.Printf("rclone %s\n", fs.Version)
os.Exit(0)
}
command, args := ParseCommand()
// Log file output
if *logFile != "" {
f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
log.Fatalf("Failed to open log file: %v", err)
}
f.Seek(0, os.SEEK_END)
log.SetOutput(f)
redirectStderr(f)
}
// Make source and destination fs
var fdst, fsrc fs.Fs
if len(args) >= 1 {
@@ -326,10 +378,10 @@ func main() {
if command.Run != nil {
command.Run(fdst, fsrc)
if !command.NoStats {
fmt.Println(fs.Stats)
fmt.Fprintln(os.Stderr, fs.Stats)
}
if fs.Config.Verbose {
log.Printf("*** Go routines at exit %d\n", runtime.NumGoroutine())
fs.Debug(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
}
if fs.Stats.Errored() {
os.Exit(1)

View File

@@ -1,365 +0,0 @@
// Test rclone by doing real transactions to a storage provider to and
// from the local disk
package main
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ogier/pflag"
// Active file systems
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
)
// Globals
var (
localName, remoteName string
version = pflag.BoolP("version", "V", false, "Print the version number")
)
// Represents an item for checking
type Item struct {
Path string
Md5sum string
ModTime time.Time
Size int64
}
// Represents all items for checking
type Items struct {
byName map[string]*Item
items []Item
}
// Make an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[items[i].Path] = &items[i]
}
return is
}
// Check off an item
func (is *Items) Find(obj fs.Object) {
i, ok := is.byName[obj.Remote()]
if !ok {
log.Fatalf("Unexpected file %q", obj.Remote())
}
delete(is.byName, obj.Remote())
// Check attributes
Md5sum, err := obj.Md5sum()
if err != nil {
log.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
}
if i.Md5sum != Md5sum {
log.Fatalf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
}
if i.Size != obj.Size() {
log.Fatalf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
}
// check the mod time to the given precision
modTime := obj.ModTime()
dt := modTime.Sub(i.ModTime)
if dt >= fs.Config.ModifyWindow || dt <= -fs.Config.ModifyWindow {
log.Fatalf("%s: Modification time difference too big |%s| > %s (%s vs %s)", obj.Remote(), dt, fs.Config.ModifyWindow, modTime, i.ModTime)
}
}
// Check all done
func (is *Items) Done() {
if len(is.byName) != 0 {
for name := range is.byName {
log.Printf("Not found %q", name)
}
log.Fatalf("%d objects not found", len(is.byName))
}
}
// Checks the fs to see if it has the expected contents
func CheckListing(f fs.Fs, items []Item) {
is := NewItems(items)
for obj := range f.List() {
is.Find(obj)
}
is.Done()
}
// Parse a time string or explode
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// Write a file
func WriteFile(filePath, content string, t time.Time) {
// FIXME make directories?
filePath = path.Join(localName, filePath)
err := ioutil.WriteFile(filePath, []byte(content), 0600)
if err != nil {
log.Fatalf("Failed to write file %q: %v", filePath, err)
}
err = os.Chtimes(filePath, t, t)
if err != nil {
log.Fatalf("Failed to chtimes file %q: %v", filePath, err)
}
}
// Create a random string
func RandomString(n int) string {
source := "abcdefghijklmnopqrstuvwxyz0123456789"
out := make([]byte, n)
for i := range out {
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
func TestMkdir(flocal, fremote fs.Fs) {
err := fs.Mkdir(fremote)
if err != nil {
log.Fatalf("Mkdir failed: %v", err)
}
items := []Item{}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
var t1 = Time("2001-02-03T04:05:06.999999999Z")
var t2 = Time("2011-12-25T12:59:59.123456789Z")
func TestCopy(flocal, fremote fs.Fs) {
WriteFile("empty space", "", t1)
// Check dry run is working
log.Printf("Copy with --dry-run")
fs.Config.DryRun = true
err := fs.Sync(fremote, flocal, false)
fs.Config.DryRun = false
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
items := []Item{
{Path: "empty space", Size: 0, ModTime: t1, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
CheckListing(flocal, items)
CheckListing(fremote, []Item{})
// Now without dry run
log.Printf("Copy")
err = fs.Sync(fremote, flocal, false)
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
func TestSync(flocal, fremote fs.Fs) {
log.Printf("Sync after changing file modtime only")
err := os.Chtimes(localName+"/empty space", t2, t2)
if err != nil {
log.Fatalf("Chtimes failed: %v", err)
}
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items := []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after adding a file")
WriteFile("potato", "------------------------------------------------------------", t1)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after changing a file's size only")
WriteFile("potato", "smaller but same date", t1)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t1, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after removing a file and adding a file --dry-run")
WriteFile("potato2", "------------------------------------------------------------", t1)
err = os.Remove(localName + "/potato")
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
fs.Config.DryRun = true
err = fs.Sync(fremote, flocal, true)
fs.Config.DryRun = false
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
before := []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t1, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, before)
log.Printf("Sync after removing a file and adding a file")
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
func TestLs(flocal, fremote fs.Fs) {
// Underlying List has been tested above, so we just make sure it runs
err := fs.List(fremote)
if err != nil {
log.Fatalf("List failed: %v", err)
}
}
func TestLsd(flocal, fremote fs.Fs) {
}
func TestCheck(flocal, fremote fs.Fs) {
}
func TestPurge(flocal, fremote fs.Fs) {
err := fs.Purge(fremote)
if err != nil {
log.Fatalf("Purge failed: %v", err)
}
}
func TestRmdir(flocal, fremote fs.Fs) {
err := fs.Rmdir(fremote)
if err != nil {
log.Fatalf("Rmdir failed: %v", err)
}
}
func syntaxError() {
fmt.Fprintf(os.Stderr, `Test rclone with a remote to find bugs in either - %s.
Syntax: [options] remote:
Need a remote: as argument. This will create a random container or
directory under it and perform tests on it, deleting it at the end.
Options:
`, Version)
pflag.PrintDefaults()
}
// Clean the temporary directory
func cleanTempDir() {
log.Printf("Cleaning temporary directory: %q", localName)
err := os.RemoveAll(localName)
if err != nil {
log.Printf("Failed to remove %q: %v", localName, err)
}
}
func main() {
pflag.Usage = syntaxError
pflag.Parse()
if *version {
fmt.Printf("rclonetest %s\n", Version)
os.Exit(0)
}
fs.LoadConfig()
rand.Seed(time.Now().UnixNano())
args := pflag.Args()
if len(args) != 1 {
syntaxError()
os.Exit(1)
}
remoteName = args[0]
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
remoteName += RandomString(32)
log.Printf("Testing with remote %q", remoteName)
var err error
localName, err = ioutil.TempDir("", "rclone")
if err != nil {
log.Fatalf("Failed to create temp dir: %v", err)
}
log.Printf("Testing with local %q", localName)
fremote, err := fs.NewFs(remoteName)
if err != nil {
log.Fatalf("Failed to make %q: %v", remoteName, err)
}
flocal, err := fs.NewFs(localName)
if err != nil {
log.Fatalf("Failed to make %q: %v", remoteName, err)
}
fs.CalculateModifyWindow(fremote, flocal)
TestMkdir(flocal, fremote)
TestCopy(flocal, fremote)
TestSync(flocal, fremote)
TestLs(flocal, fremote)
TestLsd(flocal, fremote)
TestCheck(flocal, fremote)
TestPurge(flocal, fremote)
//TestRmdir(flocal, fremote)
cleanTempDir()
log.Printf("Tests OK")
}

View File

@@ -1,3 +0,0 @@
package main
const Version = "v0.99"

15
redirect_stderr.go Normal file
View File

@@ -0,0 +1,15 @@
// Log the panic to the log file - for oses which can't do this
//+build !windows,!unix
package main
import (
"log"
"os"
)
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
log.Printf("Can't redirect stderr to file")
}

19
redirect_stderr_unix.go Normal file
View File

@@ -0,0 +1,19 @@
// Log the panic under unix to the log file
//+build unix
package main
import (
"log"
"os"
"syscall"
)
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
err := syscall.Dup2(int(f.Fd()), int(os.Stderr.Fd()))
if err != nil {
log.Fatalf("Failed to redirect stderr to file: %v", err)
}
}

View File

@@ -0,0 +1,39 @@
// Log the panic under windows to the log file
//
// Code from minix, via
//
// http://play.golang.org/p/kLtct7lSUg
//+build windows
package main
import (
"log"
"os"
"syscall"
)
var (
kernel32 = syscall.MustLoadDLL("kernel32.dll")
procSetStdHandle = kernel32.MustFindProc("SetStdHandle")
)
func setStdHandle(stdhandle int32, handle syscall.Handle) error {
r0, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
if r0 == 0 {
if e1 != 0 {
return error(e1)
}
return syscall.EINVAL
}
return nil
}
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
err := setStdHandle(syscall.STD_ERROR_HANDLE, syscall.Handle(f.Fd()))
if err != nil {
log.Fatalf("Failed to redirect stderr to file: %v", err)
}
}

109
s3/s3.go
View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"mime"
"net/http"
"path"
"regexp"
@@ -101,7 +100,8 @@ func init() {
// Constants
const (
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
listChunkSize = 1024 // number of items to read at once
)
// FsS3 represents a remote s3 server
@@ -184,6 +184,7 @@ func s3Connection(name string) (*s3.S3, error) {
}
c := s3.New(auth, region)
c.Client = fs.Config.Client()
return c, nil
}
@@ -227,7 +228,7 @@ func NewFs(name, root string) (fs.Fs, error) {
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsS3) NewFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
o := &FsObjectS3{
s3: f,
remote: remote,
@@ -256,7 +257,7 @@ func (f *FsS3) NewFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
//
// May return nil if an error occurred
func (f *FsS3) NewFsObject(remote string) fs.Object {
return f.NewFsObjectWithInfo(remote, nil)
return f.newFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
@@ -267,33 +268,46 @@ func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
if directories {
delimiter = "/"
}
// FIXME need to implement ALL loop
objects, err := f.b.List(f.root, delimiter, "", 10000)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
} else {
rootLength := len(f.root)
if directories {
for _, remote := range objects.CommonPrefixes {
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
continue
}
remote := remote[rootLength:]
fn(remote, &s3.Key{Key: remote})
}
marker := ""
for {
objects, err := f.b.List(f.root, delimiter, marker, listChunkSize)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
} else {
for i := range objects.Contents {
object := &objects.Contents[i]
if !strings.HasPrefix(object.Key, f.root) {
fs.Log(f, "Odd name received %q", object.Key)
continue
rootLength := len(f.root)
if directories {
for _, remote := range objects.CommonPrefixes {
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
continue
}
remote := remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
fn(remote, &s3.Key{Key: remote})
}
} else {
for i := range objects.Contents {
object := &objects.Contents[i]
if !strings.HasPrefix(object.Key, f.root) {
fs.Log(f, "Odd name received %q", object.Key)
continue
}
remote := object.Key[rootLength:]
fn(remote, object)
}
remote := object.Key[rootLength:]
fn(remote, object)
}
}
if !objects.IsTruncated {
break
}
// Use NextMarker if set, otherwise use last Key
marker = objects.NextMarker
if marker == "" {
marker = objects.Contents[len(objects.Contents)-1].Key
}
}
}
@@ -309,7 +323,7 @@ func (f *FsS3) List() fs.ObjectsChan {
go func() {
defer close(out)
f.list(false, func(remote string, object *s3.Key) {
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
@@ -406,9 +420,17 @@ func (o *FsObjectS3) Remote() string {
return o.remote
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *FsObjectS3) Md5sum() (string, error) {
return strings.Trim(strings.ToLower(o.etag), `"`), nil
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(etag) {
fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
return "", nil
}
return etag, nil
}
// Size returns the size of an object in bytes
@@ -418,13 +440,28 @@ func (o *FsObjectS3) Size() int64 {
// readMetaData gets the metadata if it hasn't already been fetched
//
// if we get a 404 error then we retry a few times for eventual
// consistency reasons
//
// it also sets the info
func (o *FsObjectS3) readMetaData() (err error) {
if o.meta != nil {
return nil
}
var headers s3.Headers
headers, err := o.s3.b.Head(o.s3.root+o.remote, nil)
// Try reading the metadata a few times (with exponential
// backoff) to get around eventual consistency on 404 error
for tries := uint(0); tries < 10; tries++ {
headers, err = o.s3.b.Head(o.s3.root+o.remote, nil)
if s3Err, ok := err.(*s3.Error); ok {
if s3Err.StatusCode == http.StatusNotFound {
time.Sleep(5 * time.Millisecond << tries)
continue
}
}
break
}
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
@@ -507,13 +544,13 @@ func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
metaMtime: swift.TimeToFloatString(modTime),
}
// Guess the content type
contentType := mime.TypeByExtension(path.Ext(o.remote))
if contentType == "" {
contentType = "application/octet-stream"
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, fs.MimeType(o), o.s3.perm, headers)
if err != nil {
return err
}
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, contentType, o.s3.perm, headers)
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData()
return err
}

53
s3/s3_test.go Normal file
View File

@@ -0,0 +1,53 @@
// Test S3 filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: go run gen_tests.go or make gen_tests
package s3_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/s3"
)
func init() {
fstests.NilObject = fs.Object((*s3.FsObjectS3)(nil))
fstests.RemoteName = "TestS3:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -44,6 +44,12 @@ func init() {
Help: "Memset Memstore UK v2",
Value: "https://auth.storage.memset.com/v2.0",
}},
}, {
Name: "tenant",
Help: "Tenant name - optional",
}, {
Name: "region",
Help: "Region name - optional",
},
// snet = flag.Bool("swift-snet", false, "Use internal service network") // FIXME not implemented
},
@@ -107,9 +113,15 @@ func swiftConnection(name string) (*swift.Connection, error) {
return nil, errors.New("auth not found")
}
c := &swift.Connection{
UserName: userName,
ApiKey: apiKey,
AuthUrl: authUrl,
UserName: userName,
ApiKey: apiKey,
AuthUrl: authUrl,
UserAgent: fs.UserAgent,
Tenant: fs.ConfigFile.MustValue(name, "tenant"),
Region: fs.ConfigFile.MustValue(name, "region"),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fs.Config.Transport(),
}
err := c.Authenticate()
if err != nil {
@@ -156,7 +168,7 @@ func NewFs(name, root string) (fs.Fs, error) {
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsSwift) NewFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
func (f *FsSwift) newFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
fs := &FsObjectSwift{
swift: f,
remote: remote,
@@ -178,7 +190,7 @@ func (f *FsSwift) NewFsObjectWithInfo(remote string, info *swift.Object) fs.Obje
//
// May return nil if an error occurred
func (f *FsSwift) NewFsObject(remote string) fs.Object {
return f.NewFsObjectWithInfo(remote, nil)
return f.newFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
@@ -200,8 +212,11 @@ func (f *FsSwift) list(directories bool, fn func(string, *swift.Object)) {
for i := range objects {
object := &objects[i]
// FIXME if there are no directories, swift gives back the files for some reason!
if directories && !strings.HasSuffix(object.Name, "/") {
continue
if directories {
if !strings.HasSuffix(object.Name, "/") {
continue
}
object.Name = object.Name[:len(object.Name)-1]
}
if !strings.HasPrefix(object.Name, f.root) {
fs.Log(f, "Odd name received %q", object.Name)
@@ -232,7 +247,7 @@ func (f *FsSwift) List() fs.ObjectsChan {
go func() {
defer close(out)
f.list(false, func(remote string, object *swift.Object) {
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
@@ -408,6 +423,12 @@ func (o *FsObjectSwift) Update(in io.Reader, modTime time.Time, size int64) erro
m := swift.Metadata{}
m.SetModTime(modTime)
_, err := o.swift.c.ObjectPut(o.swift.container, o.swift.root+o.remote, in, true, "", "", m.ObjectHeaders())
if err != nil {
return err
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData()
return err
}

53
swift/swift_test.go Normal file
View File

@@ -0,0 +1,53 @@
// Test Swift filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: go run gen_tests.go or make gen_tests
package swift_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/swift"
)
func init() {
fstests.NilObject = fs.Object((*swift.FsObjectSwift)(nil))
fstests.RemoteName = "TestSwift:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,3 +0,0 @@
package main
const Version = "v0.99"