1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-15 15:53:41 +00:00

Compare commits

..

279 Commits
v0.98 ... v1.24

Author SHA1 Message Date
Nick Craig-Wood
4276abc58b Version v1.24 2015-11-07 16:23:12 +00:00
Nick Craig-Wood
a795d93bc3 swift, s3, googlecloudstorage: Don't delete the container/bucket if fs wasn't at root - fixes #172 2015-11-07 15:32:40 +00:00
Nick Craig-Wood
5df04cb763 swift: ignore directory marker objects where appropriate - fixes #190
* When creating a LimitedFs
  * When calling List() to list files
  * In the Storable() method
  * Add a Purge() method to delete the directory marker objects too

This is a partial fix for #172
2015-11-07 15:32:11 +00:00
Nick Craig-Wood
ef54167a4a Add goimports check to make check 2015-11-07 12:16:33 +00:00
Nick Craig-Wood
d42cb11b84 Fix tests to run all tests again and add onedrive 2015-11-07 11:21:15 +00:00
Nick Craig-Wood
b257de4aba Be more constistent with naming in remotes
* External objects are called Fs and Object
  * Object.fs always points to the Fs
2015-11-07 11:14:46 +00:00
Nick Craig-Wood
365b4babae Make filter test files pass errcheck 2015-11-07 10:27:47 +00:00
Nick Craig-Wood
6d48dffa2f Add -dump-headers and -dump-bodies flags for remote tests 2015-11-07 10:27:47 +00:00
Nick Craig-Wood
8f2999b6af onedrive: implement Copy 2015-11-07 10:27:47 +00:00
Nick Craig-Wood
be6115fbfa Fix nil pointer exception on test failure 2015-11-07 10:19:10 +00:00
Nick Craig-Wood
2fcb8f5db7 Add support for Microsoft One Drive - fixes #10
* Still to do
    * Copy
    * Move
    * MoveDir
2015-11-07 10:19:10 +00:00
Nick Craig-Wood
0ab3f020ab Fix Amazon icon in the docs 2015-11-06 15:26:58 +00:00
Nick Craig-Wood
64c23c2f5b Update font awesome to 4.4.0 2015-11-06 15:26:58 +00:00
Nick Craig-Wood
ff16e0f6df Factor common error handling into fs module 2015-11-06 15:26:58 +00:00
Nick Craig-Wood
1a82ba196b dircache: expose FoundRoot flag 2015-11-06 15:26:58 +00:00
Nick Craig-Wood
ed72c678f8 Protect accounting from being closed twice 2015-11-06 15:26:58 +00:00
Nick Craig-Wood
4ed8836a71 oauthutil: add RedirectPublicURL 2015-11-06 15:26:58 +00:00
Nick Craig-Wood
5529978fa7 dircache: make separate mutex for the cache 2015-11-06 15:26:58 +00:00
Nick Craig-Wood
66d84c9914 Document where to install root certificates - fixes #196 2015-11-05 18:09:56 +00:00
klauspost
b85ddc4e4f Extend CI tests to include formatting checks.
CI tests now tests 'go vet', 'go fmt' (via goimports) and golint.

Adds Travis experimental OSX support.
2015-11-03 13:50:29 +01:00
klauspost
e4a9e27a55 Add proxy information to FAQ.
Fixes #160.
2015-11-02 20:19:50 +00:00
Klaus Post
22645eea2e Add AppVeyor Windows CI to tests
AppVeyor is free, and functions pretty much like Travis, only on Windows.
2015-11-02 20:11:06 +00:00
klauspost
345c98ed62 Update to AWS SDK 0.10.0
Tested with S3 and Dreamhost

Here is a link to the release notes:

http://aws.amazon.com/releasenotes/5476699172355228
2015-11-02 19:52:11 +00:00
klauspost
b872ff0237 Add option to disable server certificate verification.
The option name mirrors the 'wget' option (also `--no-check-certificate`). The cURL equivalent is called `--insecure`, which is a bit unclear.

Put in the "developers" section in documentation with proper warnings.

Fixes #168
2015-10-29 16:42:25 +01:00
Nick Craig-Wood
1b95718460 Fix typos in filter docs and unit test assertions 2015-10-20 09:16:47 +01:00
klauspost
6a3580c556 Show status of master branch, so it doesn't show the status of the last pushed branch, 2015-10-19 18:56:03 +01:00
klauspost
16c9fba5de Fix tests failing on Windows.
* ":" is kept when part of a drive.
  * Create tests.
  * Fix test framework.
2015-10-19 17:36:15 +01:00
Nick Craig-Wood
4e952af614 Allow spaces in remotes and check remote names for validity at creation time - fixes #171 2015-10-12 17:54:09 +01:00
Klaus Post
6344c3051c Add async readahead buffer
This adds an async read buffer of 4x4MB when copying files >10MB.

This fixes #164 and reduces the number of IO operations for copy/move.
2015-10-12 08:30:27 +01:00
klauspost
ab9f521cbd Allow '&' and disallow ':' in Windows filenames.
Fixes #161
2015-10-05 11:04:25 +02:00
Nick Craig-Wood
3a900e5bb7 Version v1.23 2015-10-03 16:24:07 +01:00
Nick Craig-Wood
b4d7741611 Improve output of --dump-headers 2015-10-03 16:04:51 +01:00
Nick Craig-Wood
95fd79faf9 swift: use Content-Length on uploads - fixes #125 2015-10-03 16:04:51 +01:00
Nick Craig-Wood
b79dc01016 swift: stop chunked operations logging "Failed to read info: Object Not Found" 2015-10-03 16:04:51 +01:00
Nick Craig-Wood
bf562d7373 Tweak wording on client/secret ids in acd, gcs and drive - fixes #155 2015-10-03 16:04:45 +01:00
Nick Craig-Wood
2e9f2ea3d3 oauthutil: fix headless config for drive and gcs - fixes #158 2015-10-03 16:04:21 +01:00
Nick Craig-Wood
177dbbc29a Implement rclone size for measuring remotes - fixes #153 2015-10-03 16:04:21 +01:00
Nick Craig-Wood
4712043e26 Fix race condition in pacer tests - fixes #156 2015-10-03 16:04:21 +01:00
Nick Craig-Wood
852acd5e4e oauthutil: tell the user they should try again if the webserver method failed 2015-10-03 16:04:21 +01:00
Nick Craig-Wood
9f1daabb2c s3: allow anonymous access to public repositories - fixes #154
When setting up the remote, leave both the access key and secret key
blank.
2015-10-03 16:04:21 +01:00
Nick Craig-Wood
938dd24cc9 Fix typo 2015-09-28 22:51:33 +01:00
Nick Craig-Wood
57aad81b68 Version v1.22 2015-09-28 19:38:20 +01:00
Nick Craig-Wood
a91bcaaeb0 Implement rsync like include and exclude - fixes #27
* Implement include/exclude
  * Implement rsync compatible file globbing
  * Implement command line filtering flags
    * --delete-excluded - Delete files on dest excluded from sync
    * --filter - Add a file-filtering rule
    * --filter-from - Read filtering patterns from a file
    * --exclude - Exclude files matching pattern
    * --exclude-from - Read exclude patterns from file
    * --include - Include files matching pattern
    * --include-from - Read include patterns from file
    * --files-from - Read list of source-file nam
    * --min-size - Don't transfer any file smaller than this in k or suffix k|M|G
    * --max-size - Don't transfer any file larger than this in k or suffix k|M|G
  * Document
2015-09-28 19:18:21 +01:00
Nick Craig-Wood
d04c21b198 Add Sergey Tolmachev to contributors 2015-09-28 19:12:35 +01:00
Nick Craig-Wood
4a0a42c2f1 swift: large file upload fixes
* Read metadata in file listing for 0 length files to fix syncs
  * Ignore non-existent files in isManifestFile to fix errors on copy
  * remove nsToSwiftFloatString - experiments with the swift program
    indicate that it puts a variable number of points after the
    decimal, so might as well use the one in the swift library.
  * Make sure segments get deleted properly when move from segmented
    to non segmented and vice versa
  * Use internal list routine to detect errors on listing
  * Remove the _segments container if possible
  * Remove manifest first when deleting
2015-09-26 17:58:04 +01:00
Sergey Tolmachev
cc7b9af50e swift: support files > 5GB - fixes #46
* Write segments to ..._segments container
  * Choice of container and segment names compatible with swift tool
    * See http://docs.openstack.org/developer/swift/overview_large_objects.html
  * Controlled by command line flag --swift-chunk-size
  * Segments removed on delete
2015-09-26 13:03:58 +01:00
Nick Craig-Wood
68fef49c55 swift: fetch headers as the only source of metadata 2015-09-26 13:03:58 +01:00
Nick Craig-Wood
5d4b149884 Version v1.21 2015-09-22 21:05:11 +01:00
Nick Craig-Wood
5f20ae707d Make lsl output times in localtime and fix tests - fixes #141 2015-09-22 19:04:12 +01:00
Nick Craig-Wood
e9c915e6fe Fix golint warnings 2015-09-22 18:47:16 +01:00
Nick Craig-Wood
2ed158aba3 Fixes from go vet and errcheck 2015-09-22 07:31:12 +01:00
Nick Craig-Wood
05050d53ad Implement compliant pacing scheme for Amazon Cloud Drive
* Implement switchable pacing algorithm
  * Add tests for pacer
2015-09-21 21:49:54 +01:00
Nick Craig-Wood
e391311512 gofmt 2015-09-17 18:42:39 +01:00
klauspost
3234c28f7c Minor adjustments to Unc path conversion function. 2015-09-17 18:42:10 +01:00
klauspost
6fbd9cf24b Make Google Drive Directory reads concurrent for increased speed.
If you use the -v flag then this will show the progress of directory
listing.  Fixes #143.
2015-09-17 18:42:10 +01:00
klauspost
bc5b63ffef Move ACD Connection-limiter into Pacer. 2015-09-17 18:42:10 +01:00
klauspost
788ef76f1c Show more of the filename and align output.
Print more of the file name, and make the output aligned, so it is nicer on frequent updates.
2015-09-17 15:53:45 +02:00
Nick Craig-Wood
0872ec3204 Allow user to override credentials again in drive, gcs and acd - fixes #139 2015-09-16 20:08:40 +01:00
klauspost
0a5870208e Display individual transfer progress
Improve progress printing by displaying individual file progress, as well
as a moving average speed with ETA. Example output:

2015/09/15 16:38:21
Transferred:    183599104 Bytes (4646.49 kByte/s)
Errors:                 0
Checks:                 1
Transferred:            0
Elapsed time:       38.5s
Transferring:
 * 01_06_14.mp3: 33% done. avg: 1280.5, cur: 1288.8 kByte/s. ETA: 1m12s
 * 01_12_15.mp3: 33% done. avg: 1002.2, cur:  943.4 kByte/s. ETA: 1m17s
 * 01_13_14.mp3: 48% done. avg: 1456.8, cur: 1425.2 kByte/s. ETA: 39s
 * 01_19_15.mp3: 28% done. avg: 1226.9, cur: 1114.4 kByte/s. ETA: 1m37s
2015-09-16 19:42:46 +01:00
klauspost
3219334c3e Add "-race" test to travis. 2015-09-16 12:26:28 +02:00
klauspost
79fd662676 Protect concurrent read/writes to pacer.
Protects all variables in the pacer from concurrent modifications. It is now safe to modify pacer settings while it is running.

I decided to not go for an RWMutex, since all accesses are very short, so the overhead of an RWMutex isn't worth it.

Fixes #138.
2015-09-16 12:25:55 +02:00
Nick Craig-Wood
34193fd8d9 Version v1.20 2015-09-15 07:42:31 +01:00
Nick Craig-Wood
2203766f77 Retry listing in tests to work around eventual consistency 2015-09-14 21:01:25 +01:00
Nick Craig-Wood
235cbe0e57 amazon cloud drive: retry 409 errors too 2015-09-14 21:00:44 +01:00
klauspost
f50f353b5d local: always use UNC paths on Windows - fixes #124, fixes #130, fixes #90
* Convert all paths to UNC paths on Windows.
  * Update local filesystem to always use UNC paths.
  * Change tests, so they can work with Windows character replacements.
  * Remove "/" suffix on paths.
  * Always use path/filepath
2015-09-14 19:58:03 +01:00
Klaus Post
00afe6cc96 ACD: Shorten minimum and maximum sleep. 2015-09-14 18:22:21 +01:00
Klaus Post
dd48e62b7e Limit concurrent directory listings.
Never run more than "fs.Config.Checkers" directory listings at once.
2015-09-14 18:11:59 +01:00
Klaus Post
a1a780e847 Read folders in separate goroutines.
As proposed in the FIXME, read folders in parallel.
This appears to fix "Next token is expired" on very big directories.
The only downside is that this doesn't abort at once if an error is found.
I added some logging, so there is some output for "-v".
2015-09-14 18:11:59 +01:00
Klaus Post
fa87077211 Retry if a timeout occurs.
This happens more rarely than 500, but can still be encountered.
2015-09-14 18:02:35 +01:00
Klaus Post
6ac7145d2d Retry when we get a "500 Internal server error".
It seems like this happens randomly, and retrying works fine for that.
2015-09-14 18:02:35 +01:00
Nick Craig-Wood
f1226f19b2 drive, googlecloudstorage: optionally use auto config for the oauth token 2015-09-12 14:17:39 +01:00
Klaus Post
3ecbf2af25 Fix missing link text. 2015-09-12 11:22:17 +02:00
Nick Craig-Wood
79f2e95bf9 Add Klaus Post to Contributors 2015-09-11 20:20:28 +01:00
klauspost
faee50b238 List multiple matched commands.
* When multiple commands match, list them all.
* Change example to an existing command.
2015-09-11 19:44:50 +01:00
klauspost
807d4a3c00 Improve OAUTH2 usability - fixes #131
Shorten the URL to be used by the user and automatically use the
returned code by the server. The browser is opened on
`http://(bindaddress)/auth`, and redirected to the actual URL. When
the code is returned it is automatically inserted, instead of
requiring a copy+paste.

This is also a workaround for the "open" package, which escapes "&"
wrongly on Windows, so the opened URL's are invalid.
2015-09-11 19:44:50 +01:00
Nick Craig-Wood
073d112204 Factor pacer module from Drive and use it in Amazon Cloud Drive for
smooth API pacing and retry logic.
2015-09-11 19:18:41 +01:00
Nick Craig-Wood
14f814b806 acd: Retry on 429 errors with backoff and fix upload of 0 length files 2015-09-09 23:23:37 +01:00
Nick Craig-Wood
a288c2b3a3 Make a retry error wrapper for a plain error 2015-09-09 23:22:41 +01:00
Nick Craig-Wood
fec16b0ac8 acd: Skip test on FS which don't support ModifiedTime 2015-09-09 23:21:50 +01:00
Nick Craig-Wood
dd8717797e Implement --dump-headers and --dump-bodies debug flags 2015-09-08 21:02:48 +01:00
Nick Craig-Wood
7e7c239f09 Through popular demand add a donations page 2015-09-08 21:02:48 +01:00
Nick Craig-Wood
edd0e8abb1 Add Colin Nicholson to contributors 2015-09-08 21:02:48 +01:00
Colin Nicholson
d2b537d9a1 Fix docs link typo for googlecloudstorage - fixes #120 2015-09-08 21:02:48 +01:00
Nick Craig-Wood
8c3df224ef Implement Amazon Cloud Drive - fixes #45
* Optional interfaces Copier, Mover, DirMover not done
2015-09-08 21:02:48 +01:00
Nick Craig-Wood
967fd2a778 dircache: implement FindRootParent and unify locking 2015-09-06 10:27:15 +01:00
Nick Craig-Wood
ea12e446ca Factor DirCache from drive into its own module 2015-09-05 11:58:54 +01:00
Nick Craig-Wood
c8cd2b510f More notes 2015-09-02 19:17:33 +01:00
Nick Craig-Wood
8b05a8322b FAQ entry about bidirectional sync - see #118 2015-09-02 08:33:53 +01:00
Nick Craig-Wood
c98a51b26c Lightly obscure secrets 2015-09-01 22:33:34 +01:00
Nick Craig-Wood
e2717a031e Implement Mover and DirMover interfaces fixes #115
* unit tests
  * local
  * drive
  * dropbox
2015-09-01 21:49:13 +01:00
Nick Craig-Wood
8d33ce0154 Check for source and dest being the same in sync/copy/move 2015-09-01 21:49:13 +01:00
Nick Craig-Wood
92745aa950 Add Root() to Fs interface 2015-09-01 21:49:13 +01:00
Nick Craig-Wood
cbc6bf6a89 FAQ entry on partial transfers / binary diffs - fixes #113 2015-08-31 12:47:07 +01:00
Nick Craig-Wood
f72575e75f Remove support for go 1.2 now that go 1.5 is out 2015-08-29 18:58:48 +01:00
Nick Craig-Wood
0168f55f3e Switch to spf13 fork of pflag - fixes #116
This supports --long value as well as --long=value which is as
expected for a unix utility.
2015-08-29 18:14:24 +01:00
Nick Craig-Wood
8b60ab86a1 dropbox: force use of our custom transport which makes timeouts work 2015-08-29 17:48:15 +01:00
Nick Craig-Wood
7463a7a509 Use "golang.org/x/oauth2" as oauth libary of choice - fixes #102
* get rid of depreprecated "code.google.com/p/goauth2/oauth"
  * store tokens in config file as before
  * read old format tokens and write in new format seamlessly
  * set our own transport to enforce timeouts etc
2015-08-29 17:47:23 +01:00
Nick Craig-Wood
9ed2de3d6e Version v1.19 2015-08-28 09:47:13 +01:00
Nick Craig-Wood
4f35fb59c8 Build for plan9/amd64 and solaris/amd64 too 2015-08-28 09:40:46 +01:00
Nick Craig-Wood
59ba8f28c8 Implement move command - fixes #35
* Define Mover interface to move a single object
  * Define DirMover interface to move a directory
  * Implement DirMove operation
  * Add `rclone move` command
  * Tests for Dir Move

To Do
  * Implement Move, DirMover in local, drive, dropbox
  * unit test for Mover
  * unit test for DirMover
2015-08-28 08:49:16 +01:00
Nick Craig-Wood
d298b578ab s3: Fix after upstream API changes in aws-sdk-go/aws - fixes #114 2015-08-28 08:47:41 +01:00
Nick Craig-Wood
fabbc035c4 Make a current version download with a fixed URL for scripting - fixes #106 2015-08-27 20:11:11 +01:00
Nick Craig-Wood
6530b07cde FAQ entry about copying the config file 2015-08-27 19:46:28 +01:00
Nick Craig-Wood
f8b7eaec93 s3: Document cross region bucket limitations - fixes #105 2015-08-25 20:15:50 +01:00
Nick Craig-Wood
5c226e91c0 Ignore rmdir in limited fs rather than throwing error - fixes #112 2015-08-25 19:16:25 +01:00
Nick Craig-Wood
8e3d45d2dc dropbox: increase chunk size to improve upload speeds - fixes #103
Chunks aren't buffered in memory, so chose 128M as the default size as
producing the maximum throughput.  This takes the throughput from 78
kBytes/s to 4MBytes/s a 50x improvement!
2015-08-25 19:01:37 +01:00
Nick Craig-Wood
a96b522958 Implement server side copies if possible - fixes #99
Add optional fs.Copier interface

Implemented for
  * swift
  * s3
  * drive
  * dropbox
  * google cloud storage
2015-08-23 21:18:38 +01:00
Nick Craig-Wood
fedf81c2b7 Add Name() to Fs interface to return name as passed to NewFs 2015-08-23 13:36:38 +01:00
Nick Craig-Wood
0c6f816a49 Implement --retries flag - fixes #109 2015-08-20 21:07:00 +01:00
Nick Craig-Wood
dfe771fb0c Correct log messages for remotes which don't support modtime/md5sum 2015-08-20 20:48:58 +01:00
Nick Craig-Wood
bc19e2d84b dropbox: Issue an error message when trying to upload bad file name - fixes #108 2015-08-20 20:46:35 +01:00
Nick Craig-Wood
8c4d91cff7 Add privacy policy to the website 2015-08-19 22:10:04 +01:00
Nick Craig-Wood
2fcc18779b Version v1.18 2015-08-17 17:59:37 +01:00
Nick Craig-Wood
96cc3e5a0b Add FAQ to menu and documentation fixes 2015-08-17 17:26:37 +01:00
Leonid Shalupov
cc8fe0630c Change email of Leonid Shalupov - fixes #94 2015-08-17 17:26:36 +01:00
Nick Craig-Wood
1d9e76bb0f dropbox: remove datastore - Fixes #55 #84
This means that dropbox no longer stores MD5SUMs and modified times.

Fixup the tests so that blank MD5SUMs are ignored, and that if
Precision is set to a fs.ModTimeNotSupported, ModTimes can be ignored too.

This opens the door for other FSs which don't support metadata easily.
2015-08-17 17:26:36 +01:00
Nick Craig-Wood
337110b7a0 s3: remove verbose debug about invalid md5sums for multipart upload 2015-08-16 18:14:22 +01:00
Nick Craig-Wood
83733205f6 Fix favicon - fixes #79 2015-08-16 15:51:39 +01:00
Nick Craig-Wood
d8306938a1 drive: Add --drive-use-trash flag so rclone trashes instead of deletes - fixes #82 2015-08-16 15:51:39 +01:00
Nick Craig-Wood
88ea8b305d drive: Add "Forbidden to download" message for files with no downloadURL - fixes #95 2015-08-16 15:51:39 +01:00
Nick Craig-Wood
e2f4d7b5e3 FAQ entry about using rclone from multiple places - fixes #78 2015-08-16 15:51:39 +01:00
Nick Craig-Wood
8140869767 s3: Update documentation after transition to new SDK - also fixes #47 2015-08-16 15:51:38 +01:00
Nick Craig-Wood
6a8de87116 s3: make v2 signatures work for ceph 2015-08-16 15:51:38 +01:00
Nick Craig-Wood
0da6f24221 s3: use official github.com/aws/aws-sdk-go including multipart upload - fixes #101 2015-08-16 15:51:04 +01:00
Nick Craig-Wood
771e60bd07 Show errors when reading the config file 2015-08-15 17:15:02 +01:00
Nick Craig-Wood
40b3c4883f testing: add -remote flag to allow unit tests to run on other remotes
Use this on the individual fs unit tests, eg

    cd s3
    go run -v -remote mytestS3:
2015-08-15 15:40:18 +01:00
Nick Craig-Wood
e320f4a988 Add Shimon Doodkin to contributors 2015-08-10 11:25:21 +01:00
Shimon Doodkin
5835f15f21 linux installation instructions binary download 2015-08-10 11:22:34 +01:00
Leonid Shalupov
67c311233b Run travis builds on container-based infrastructure
See http://docs.travis-ci.com/user/migrating-from-legacy/
TL;DR It starts much faster
2015-08-10 11:17:54 +01:00
Leonid Shalupov
3fcff32524 do not print stats in quiet mode - fixes #70
...unless had some errors or stats interval requested.

Add fs.ErrorLog to differentiate between Logs which should be
suppressed and errors which shouldn't.
2015-08-10 11:17:54 +01:00
Nick Craig-Wood
472f065ce7 Make sizes in Dropbox 64 bit - fixes #80 2015-08-10 11:17:54 +01:00
Nick Craig-Wood
6c6d7eb770 Drop support for Go 1.1 2015-08-10 11:17:54 +01:00
Nick Craig-Wood
c646ada3c3 docs: add FAQ and update docs 2015-08-10 11:17:43 +01:00
Nick Craig-Wood
f55359b3ac Fix created directories not obeying umask 2015-07-29 10:21:12 +01:00
Nick Craig-Wood
9d9a17547a Version v1.17 2015-06-14 15:36:16 +01:00
Nick Craig-Wood
c6dc88766b Add Leonid Shalupov to contributors 2015-06-14 15:32:16 +01:00
Leonid Shalupov
754ce9dec6 dropbox: record names coming from dropbox API, fixes #53 case insensitivity causes duplicated files 2015-06-14 15:23:56 +01:00
Leonid Shalupov
bd5f685d0a fix TestLsLong on non-UTC timezones 2015-06-14 15:23:56 +01:00
Nick Craig-Wood
c663e24669 Note how to update rclone dependencies in the install documentation 2015-06-14 15:04:39 +01:00
Nick Craig-Wood
5948764e9e Link to changelog 2015-06-14 15:04:29 +01:00
Nick Craig-Wood
539ad44757 Version v1.16 2015-06-09 18:00:33 +01:00
Nick Craig-Wood
74994a2ec1 Fix uploading big files which was causing timeouts or panics
The symtom was one of these two on upload of files only
  * panic: d.nx != 0 in crypto/md5.(*digest).checkSum
  * read tcp: i/o timeout

It turned out to be a combination of two upstream bugs

  * 5a2187309e
  * https://groups.google.com/forum/#!topic/golang-dev/0Nl6k5Sj6UU

This commit contains a work-around for the second problem, I've fixed
the first and had the change accepted upstream.
2015-06-09 17:32:45 +01:00
Nick Craig-Wood
97dced6a0b Don't check md5sum after download with --size-only - fixes #75 2015-06-09 13:18:40 +01:00
Nick Craig-Wood
e04acb09ce Version v1.15 2015-06-06 15:45:00 +01:00
Nick Craig-Wood
87ed7fc932 Document rclone's limitations with directories - fixes #69 2015-06-06 14:33:08 +01:00
Nick Craig-Wood
90744301d3 Fix package docs so they appear in godoc correctly 2015-06-06 14:24:30 +01:00
Nick Craig-Wood
bf4879f57f Expand docs and remove duplicated information 2015-06-06 14:17:14 +01:00
Nick Craig-Wood
e22b445cff Implement --size-only flag to sync on size not checksum & modtime - fixes #75 2015-06-06 08:49:01 +01:00
Nick Craig-Wood
5ab7970e18 dropbox: update docs about case insensitivity - see #53 2015-06-06 08:44:09 +01:00
Nick Craig-Wood
e984eeedc4 Add Alex Couper to contributors 2015-06-05 20:12:56 +01:00
Nick Craig-Wood
968b5a0984 Update notes 2015-06-05 20:00:36 +01:00
Alex Couper
7af1282375 Add --checksum flag to only discard transfers by MD5SUM - fixes #61
Useful for copying between backends where checksum fetching is fast,
ie any of s3, swift, drive or googlecloudstorage.
2015-06-05 19:46:03 +01:00
Nick Craig-Wood
d9fcc32f70 Version v1.14 2015-05-21 20:13:40 +01:00
Nick Craig-Wood
870a9fc3b2 local: fix encoding of non utf-8 file names - fixes #66 2015-05-21 18:40:16 +01:00
Nick Craig-Wood
8e3703abeb drive: docs about rate limiting 2015-05-21 18:39:46 +01:00
Nick Craig-Wood
ba81277bbe google cloud storage: Fix compile after API change in "google.golang.org/api/storage/v1"
The breaking change is: google-api-go-generator: remove underscores from identifiers.
2015-05-19 08:18:26 +01:00
Nick Craig-Wood
88293a4b8a Version v1.13 2015-05-10 12:39:06 +01:00
Nick Craig-Wood
981104519e Revise documentation (especially sync) - fixes #39 2015-05-10 12:17:24 +01:00
Nick Craig-Wood
1d254a3674 Implement --timeout and --conntimeout - fixes #54
NB dropbox still to do
2015-05-10 11:29:55 +01:00
Nick Craig-Wood
f88d171afd s3: ignore etags from multipart uploads which aren't md5sums - fixes #56 2015-05-10 11:29:55 +01:00
Nick Craig-Wood
ba2091725e Version v1.12 2015-03-15 15:55:38 +00:00
Nick Craig-Wood
7c120b8bc5 drive: add --drive-chunk-size and --drive-upload-cutoff parameters 2015-03-15 15:27:55 +00:00
Nick Craig-Wood
5cc5429f99 drive: switch to insert from update when a failed copy deletes the upload 2015-03-15 15:27:55 +00:00
Nick Craig-Wood
09d71239b6 Make file size render more neatly and prevent from being < 0 2015-03-15 15:27:55 +00:00
Nick Craig-Wood
c643e4585e core: Log duplicate files if they are detected 2015-03-15 15:27:55 +00:00
Nick Craig-Wood
873db29391 Log all objects more informatively 2015-03-15 15:27:55 +00:00
Nick Craig-Wood
81a933ae38 drive: Use chunked upload for files - fixes #33 2015-03-15 15:27:55 +00:00
Nick Craig-Wood
ecb3c7bcef drive, googlecloudstorage: remove SeekWrapper after googleapi fix 2015-03-04 20:47:59 +00:00
Nick Craig-Wood
80000b904c Version v1.11 2015-03-04 17:59:31 +00:00
Nick Craig-Wood
c47c9cd440 swift: add region parameter - fixes #38 2015-03-04 17:09:53 +00:00
Nick Craig-Wood
b4a0941d4c In remote paths, change native directory separators to / - fixes #37 2015-03-02 17:04:34 +00:00
Nick Craig-Wood
c03d6a1ec3 drive: fix crash on failed to update remote mtime - fixes #36 2015-03-02 09:25:33 +00:00
Nick Craig-Wood
46d39ebaf7 Factor Mime Type guessing into fs.MimeType() 2015-03-02 09:21:15 +00:00
Nick Craig-Wood
fe68737268 Fix niggles found by go vet 2015-02-28 15:35:54 +00:00
Nick Craig-Wood
2360bf907a Add synchronization to list output to stop corruptions - fixes #29 2015-02-28 15:30:40 +00:00
Nick Craig-Wood
aa093e991e Ensure all stats/log messages to go stderr - fixes #30 2015-02-28 14:39:00 +00:00
Nick Craig-Wood
a5974999eb Update docs - fixes #32 2015-02-28 14:15:47 +00:00
Nick Craig-Wood
24a6ff54c2 Add --log-file flag to log everything (including panics) to file 2015-02-28 08:10:20 +00:00
Nick Craig-Wood
e89ea3360e Make it possible to disable stats printing with --stats=0 2015-02-27 15:22:26 +00:00
Nick Craig-Wood
85f8552c4d Tidy logging 2015-02-27 15:22:05 +00:00
Nick Craig-Wood
a287e3ced7 Implement --bwlimit to limit data transfer bandwidth 2015-02-27 15:03:47 +00:00
Nick Craig-Wood
8e4d8d13b8 drive: rename internal api 2015-02-20 09:51:07 +00:00
Nick Craig-Wood
cf208ad21b Version v1.10 2015-02-12 18:00:20 +00:00
Nick Craig-Wood
0faed16899 s3: list an unlimited number of items - fixes #22 2015-02-10 17:58:29 +00:00
Nick Craig-Wood
8d1c0ad07c Fix config loop - fixes #25 2015-02-10 16:48:04 +00:00
Nick Craig-Wood
165e89c266 Version v1.09 2015-02-07 22:44:23 +00:00
Nick Craig-Wood
b4e19cfd62 windows: make tests work properly 2015-02-07 22:32:51 +00:00
Nick Craig-Wood
20ad96f3cd windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
This was done by stopping the user configuring single letter remotes
and making sure we don't treat single letter remotes as a remote name,
but as a drive letter.
2015-02-07 22:32:51 +00:00
Nick Craig-Wood
d64a37772f local: Fix directory separators on Windows - fixes #24 2015-02-07 22:32:51 +00:00
Nick Craig-Wood
5fb6f94579 drive: fix rate limit exceeded errors - fixes #20
This is done by pacing the requests to drive and backing them off
using an exponential increase.  Put and Modify operations can now be
retried also.
2015-02-07 22:32:51 +00:00
Nick Craig-Wood
20535348db Update docs to remove obsolete bug - fixes #21 2015-02-07 22:32:51 +00:00
Nick Craig-Wood
3d83a265c5 Update notes 2015-02-05 22:44:02 +00:00
Nick Craig-Wood
18a8a61cc5 Release v1.08 2015-02-04 22:31:56 +00:00
Nick Craig-Wood
1758621a51 drive: fix subdirectory listing to not list entire drive - fixes #23
This was causing inexplicably slow transfers on subdirectories of
drives with lots of files.
2015-02-04 22:22:03 +00:00
Nick Craig-Wood
5710247bf6 drive: Fix SetModTime 2015-01-04 23:19:59 +00:00
Nick Craig-Wood
78b03929b7 Fix ModTime test 2015-01-04 16:57:55 +00:00
Nick Craig-Wood
492362ec7d Catch nil from List() in tests 2015-01-04 16:23:22 +00:00
Nick Craig-Wood
51b24a1dc6 dropbox: adapt code to recent library changes 2014-12-23 13:55:22 +00:00
Nick Craig-Wood
cfdb48c864 Version v1.07 2014-12-23 11:26:32 +00:00
Nick Craig-Wood
14567952b3 google cloud storage: Fix memory leak - fixes #17
This was the same problem as issue #5 (which affected google drive)
2014-12-23 11:03:34 +00:00
Nick Craig-Wood
2b052671e2 swift: Add docs for tentant 2014-12-12 20:38:35 +00:00
Nick Craig-Wood
439a126af6 Version v1.06 2014-12-12 20:13:03 +00:00
Nick Craig-Wood
0fb35f081a Use new location of Google API package - fixes #16 2014-12-12 20:02:08 +00:00
Nick Craig-Wood
9ba25c7219 Test with go 1.4 too 2014-12-12 19:27:14 +00:00
Nick Craig-Wood
af9c447146 Fix "Couldn't find home directory" on OSX - fixes #15 2014-12-12 19:18:23 +00:00
Nick Craig-Wood
ee6b39aa6c Add tenant parameter for swift - fixes #13 2014-12-12 15:26:08 +00:00
Nick Craig-Wood
839133c5e1 Version v1.05 2014-08-09 17:22:17 +01:00
Nick Craig-Wood
f4eb48e531 Fix test incantation 2014-08-09 17:18:17 +01:00
Nick Craig-Wood
18439cf2d7 Move rclonetest into go tests for fs module 2014-08-03 11:18:25 +01:00
Nick Craig-Wood
d3c16608e4 Test Listing the Root of each Fs 2014-07-31 23:20:39 +01:00
Nick Craig-Wood
3e27ff1b95 Add Root List test and fs.Limited tests for single files 2014-07-31 21:35:29 +01:00
Nick Craig-Wood
ff91698fb5 Skip tests if test remote not configured 2014-07-31 08:51:39 +01:00
Nick Craig-Wood
c389616657 all: make private functions / variables / constant which shouldn't be public 2014-07-29 17:50:07 +01:00
Nick Craig-Wood
442578ca25 drive: reset root directory on Rmdir and Purge 2014-07-29 17:32:06 +01:00
Nick Craig-Wood
0b51d6221a s3: make reading metadata more reliable to work around eventual consistency problems 2014-07-29 17:32:06 +01:00
Nick Craig-Wood
2f9f9afac2 fs: Document that Purger returns error on empty directory, test and fix 2014-07-29 17:18:22 +01:00
Nick Craig-Wood
9711a5d647 google cloud storage: re-read metadata in SetModTime 2014-07-29 17:18:22 +01:00
Nick Craig-Wood
cc679aa714 google cloud storage: fix ListDir on subdirectory 2014-07-29 17:18:22 +01:00
Nick Craig-Wood
457ef2c190 Automatically generate the tests files for each Fs 2014-07-29 17:18:22 +01:00
Nick Craig-Wood
17ffb0855f Fixes after running errcheck 2014-07-25 18:19:49 +01:00
Nick Craig-Wood
125fc8f1f0 s3: strip trailing / from ListDir() 2014-07-24 23:13:33 +01:00
Nick Craig-Wood
1660903aa2 local: fix unit tests
* Change log.Printf into fs.Log
  * Re-read metadata on SetModtime
2014-07-24 23:13:33 +01:00
Nick Craig-Wood
b013c58537 swift: return directories without / in ListDir 2014-07-24 23:13:33 +01:00
Nick Craig-Wood
a5b0d88608 Make tests for each Fs
Factor tests out of rclonetest
2014-07-24 23:13:32 +01:00
Nick Craig-Wood
02d50f8c6e local: remove annoying debug message 2014-07-22 23:06:01 +01:00
Nick Craig-Wood
e09ef62d5b core: Fix race detected by go race detector 2014-07-22 23:03:14 +01:00
Nick Craig-Wood
a75bc0703f Version 1.04 2014-07-21 21:32:37 +01:00
Nick Craig-Wood
80ecea82e8 google cloud storage: Fix crash on Update error - fixes #9 2014-07-21 21:25:46 +01:00
Nick Craig-Wood
54cd46372a Version 1.03 2014-07-20 11:28:50 +01:00
Nick Craig-Wood
282cba20a0 swift, s3, dropbox: fix metadata read on Update()
This was causing changed files to be marked as corrupted on upload
2014-07-20 11:23:05 +01:00
Nick Craig-Wood
2479ce2c8e dropbox: go1.1 compatibility 2014-07-19 15:48:40 +01:00
Nick Craig-Wood
9aa4b6bd9b Version 1.02 2014-07-19 13:24:48 +01:00
Nick Craig-Wood
6c10024420 rclonetest: add --subdir flag for testing with a sub directory
Also add a test script for testing all the remotes
2014-07-19 13:07:56 +01:00
Nick Craig-Wood
e559194fb2 fs: Verify sizes are the same after transfer in Copy() 2014-07-19 13:05:07 +01:00
Nick Craig-Wood
1c472348b6 s3: Read metadata after Update or Put 2014-07-19 13:05:07 +01:00
Nick Craig-Wood
5a8bce6353 swift: Read metadata after Update or Put 2014-07-19 13:05:06 +01:00
Nick Craig-Wood
f9b31591f9 drive: Flush directory cache on Purge 2014-07-19 13:05:06 +01:00
Nick Craig-Wood
1527e64ee7 local: Implement Purger interface 2014-07-19 13:05:01 +01:00
Nick Craig-Wood
f7652db4f1 local: Make sure info is never nil 2014-07-19 11:50:11 +01:00
Nick Craig-Wood
8b75fb14c5 local: calculate md5sum on Read or Update since we check it in Copy() 2014-07-19 11:06:25 +01:00
Nick Craig-Wood
07f9a1a9f0 Update docs for Dropbox and Google Cloud Storage 2014-07-18 10:10:08 +01:00
Nick Craig-Wood
7d8bac2711 google cloud storage: fix merge conflict
Conflicts:
	rclone.go
	rclonetest/rclonetest.go
2014-07-16 12:21:01 +01:00
Nick Craig-Wood
cad9479a00 google cloud storage: Update metadata on Put since we get it back 2014-07-16 12:12:36 +01:00
Nick Craig-Wood
dfc8a375f6 dropbox: Switch to using RFC3339 for time metadata 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
7c9bdb4b7a dropbox: make limited fs work (copy single file) 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
f8bb0d9cc8 dropbox: remove metadata when we remove files 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
b185e104ed dropbox: Fix mkdir on already created directory 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
e57a4c7c0c dropbox: open the datastore in the background 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
d2f187e1a1 dropbox: Use /delta to list objects - much quicker
Also fix major performance problem - re-reading entry each time!
2014-07-15 19:27:42 +01:00
Nick Craig-Wood
c9aca33030 dropbox: Fix concurrent access to Dropbox datastore and Lower case keys in datastore 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
2b0911531c dropbox: basics of metadata in Dropbox datastore working 2014-07-15 19:27:42 +01:00
Nick Craig-Wood
2149185fc2 dropbox: Initial support of full Fs interface
Still missing metadata support (eg SetModTime)
2014-07-15 19:27:42 +01:00
Nick Craig-Wood
0159da9f37 dropbox: graphics for the Dropbox app (used in auth process) 2014-07-15 19:27:41 +01:00
Nick Craig-Wood
680283d69f google cloud storage: fix download of files in sub directories 2014-07-15 19:27:31 +01:00
Nick Craig-Wood
c71f339e01 google cloud storage: implement ACLs and delete 2014-07-15 19:27:31 +01:00
Nick Craig-Wood
c91c96565f google cloud storage: set the Content-Type from the file name 2014-07-15 19:27:31 +01:00
Nick Craig-Wood
b72fc69fbe google cloud storage: Make operations on single files work 2014-07-15 19:27:30 +01:00
Nick Craig-Wood
a1732c21d8 google cloud storage: Initial support for full Fs interface 2014-07-15 19:27:30 +01:00
Nick Craig-Wood
b83441081c drive: factor common authentication code into googleauth module
In preparation for Google Cloud Storage support
2014-07-15 19:27:30 +01:00
Nick Craig-Wood
8a76568ea8 core: Verify MD5 sums after each transfer 2014-07-15 19:27:05 +01:00
Nick Craig-Wood
c4dc9d273a rclonetest: check sub directory and downloads 2014-07-15 13:28:48 +01:00
Nick Craig-Wood
66cf2df780 drive: check errors in Open() better 2014-07-15 13:28:35 +01:00
Nick Craig-Wood
c1a245d1c8 Factor UserAgent to fs and move Version to fs 2014-07-13 19:19:58 +01:00
Nick Craig-Wood
e40b09fe61 drive: Fix comment 2014-07-13 10:54:35 +01:00
Nick Craig-Wood
eb2b4ea8aa rclone: Don't purge if --dry-run set 2014-07-13 10:54:30 +01:00
Nick Craig-Wood
e055ed0489 rclone: change "ls" and add "lsl" and "md5sum" commands
Changed "ls" command not to show modification time by default only
size and path.  That is because it is slow for nearly all the remotes
as it requires extra metadata lookup.  All remotes can look up files
and sizes without extra operations.

Added "lsl" which does what "ls" used to - namely show modification
time, size and path.

Added "md5sum" which produces the same output as the md5sum command -
md5sums and paths that is.
2014-07-12 12:09:20 +01:00
Nick Craig-Wood
dd6d7cad3a Notes about storage systems 2014-07-09 20:50:08 +01:00
Nick Craig-Wood
37b2274e10 Version 1.01 2014-07-04 21:15:27 +01:00
Nick Craig-Wood
91cfbd4146 drive: fix transfer of big files using up lots of memory - fixes #5
This was done by making a seekWrapper which wraps an io.Reader with a
basic Seek for code.google.com/p/google-api-go-client/googleapi to
detect the length.  Without this the getReaderSize function reads the
entire file into memory to find its length.
2014-07-04 17:17:21 +01:00
Nick Craig-Wood
d4817399ff Fix release procedure 2014-07-03 22:01:25 +01:00
Nick Craig-Wood
48d259da68 Version 1.00 2014-07-03 21:56:54 +01:00
Nick Craig-Wood
f86fa6a062 Fix make tag 2014-07-03 21:43:14 +01:00
Nick Craig-Wood
93cb0a47e4 drive: fix whole second dates - fixes #4 2014-07-03 21:32:01 +01:00
Nick Craig-Wood
a12760c038 Travis build correction - fixes #2 2014-06-26 19:05:48 +01:00
Nick Craig-Wood
fdcd6a3a4c Add travis build file 2014-06-26 18:23:54 +01:00
Nick Craig-Wood
cb7891f4ff Fix retag 2014-06-26 17:57:32 +01:00
Nick Craig-Wood
8f2684fa70 Version 0.99 2014-06-26 17:49:26 +01:00
Nick Craig-Wood
7ebf48ef42 Fix --dry-run not working and add tests for it - fixes #3 2014-06-26 15:33:06 +01:00
Nick Craig-Wood
1d67b014cb Make compatible with go 1.1 - fixes #1 2014-06-26 15:18:48 +01:00
105 changed files with 15336 additions and 2443 deletions

5
.gitignore vendored
View File

@@ -4,6 +4,7 @@ rclone
rclonetest/rclonetest
build
docs/public
README.html
README.txt
MANUAL.md
MANUAL.html
MANUAL.txt
rclone.1

24
.travis.yml Normal file
View File

@@ -0,0 +1,24 @@
language: go
sudo: false
os:
- linux
- osx
go:
- 1.3.3
- 1.4.2
- 1.5
- tip
install:
- go get ./...
- go get -u github.com/golang/lint/golint
- go get -u golang.org/x/tools/cmd/goimports
script:
- go vet ./...
- diff <(goimports -d .) <(printf "")
- diff <(golint ./...) <(printf "")
- go test -v ./...
- go test -cpu=2 -race -v ./...

View File

@@ -1,40 +1,54 @@
SHELL = /bin/bash
TAG := $(shell git describe --tags)
LAST_TAG := $(shell git describe --tags --abbrev=0)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = "v" . $$_')
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
rclone: *.go */*.go
rclone:
@go version
go build
go install -v ./...
doc: rclone.1 README.html README.txt
test: rclone
go test ./...
cd fs && ./test_all.sh
rclone.1: README.md
pandoc -s --from markdown --to man README.md -o rclone.1
check: rclone
go vet ./...
errcheck ./...
golint ./...
diff <(goimports -d .) <(printf "")
README.html: README.md
pandoc -s --from markdown_github --to html README.md -o README.html
doc: rclone.1 MANUAL.html MANUAL.txt
README.txt: README.md
pandoc -s --from markdown_github --to plain README.md -o README.txt
rclone.1: MANUAL.md
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
MANUAL.md: make_manual.py docs/content/*.md
./make_manual.py
MANUAL.html: MANUAL.md
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
install: rclone
install -d ${DESTDIR}/usr/bin
install -t ${DESTDIR}/usr/bin rclone
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
clean:
go clean ./...
find . -name \*~ | xargs -r rm -f
rm -rf build docs/public
rm -f rclone rclonetest/rclonetest rclone.1 README.html README.txt
rm -f rclone rclonetest/rclonetest rclone.1 MANUAL.md MANUAL.html MANUAL.txt
website:
cd docs && hugo
upload_website: website
./rclone -v sync docs/public memstore:www-rclone-org
rclone -v sync docs/public memstore:www-rclone-org
upload:
./rclone -v copy build/ memstore:downloads-rclone-org
rclone -v copy build/ memstore:downloads-rclone-org
cross: doc
./cross-compile $(TAG)
@@ -45,17 +59,18 @@ serve:
tag:
@echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEW_TAG)"
echo -e "package main\n const Version = \"$(NEW_TAG)\"\n" | gofmt > version.go
cp -av version.go rclonetest/version.go
echo -e "package fs\n\n// Version of rclone\nconst Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
git tag $(NEW_TAG)
@echo "Add this to changelog in README.md"
@echo " * $(NEW_TAG) - " `date -I`
@echo "Add this to changelog in docs/content/changelog.md"
@echo " * $(NEW_TAG) -" `date -I`
@git log $(LAST_TAG)..$(NEW_TAG) --oneline
@echo "Then commit the changes"
@echo git commit -m "Version $(NEW_TAG)" -a -v
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo "And finally run make retag before make cross etc"
retag:
echo git tag -f $(LAST_TAG)
git tag -f $(LAST_TAG)
gen_tests:
cd fstest/fstests && go generate

252
README.md
View File

@@ -1,17 +1,22 @@
% rclone(1) User Manual
% Nick Craig-Wood
% Apr 24, 2014
Rclone
======
[![Logo](http://rclone.org/img/rclone-120x120.png)](http://rclone.org/)
[Website](http://rclone.org) |
[Documentation](http://rclone.org/docs/) |
[Changelog](http://rclone.org/changelog/) |
[Installation](http://rclone.org/install/) |
[G+](https://google.com/+RcloneOrg)
[![Build Status](https://travis-ci.org/ncw/rclone.png?branch=master)](https://travis-ci.org/ncw/rclone) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone) [![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
Rclone is a command line program to sync files and directories to and from
* Google Drive
* Amazon S3
* Openstack Swift / Rackspace cloud files / Memset Memstore
* Dropbox
* Google Cloud Storage
* Amazon Cloud Drive
* The local filesystem
Features
@@ -24,242 +29,13 @@ Features
* Check mode to check all MD5SUMs
* Can sync to and from network, eg two different Drive accounts
See the Home page for more documentation and configuration walkthroughs.
See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
* http://rclone.org/
Install
-------
Rclone is a Go program and comes as a single binary file.
Download the binary for your OS from
* http://rclone.org/downloads/
Or alternatively if you have Go installed use
go install github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`.
Configure
---------
First you'll need to configure rclone. As the object storage systems
have quite complicated authentication these are kept in a config file
`.rclone.conf` in your home directory by default. (You can use the
`--config` option to choose a different config file.)
The easiest way to make the config is to run rclone with the config
option, Eg
rclone config
Usage
-----
Rclone syncs a directory tree from local to remote.
Its basic syntax is
Syntax: [options] subcommand <parameters> <parameters...>
See below for how to specify the source and destination paths.
Subcommands
-----------
rclone copy source:path dest:path
Copy the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Doesn't delete files from the destination.
rclone sync source:path dest:path
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the `--dry-run` flag.
rclone ls [remote:path]
List all the objects in the the path.
rclone lsd [remote:path]
List all directories/objects/buckets in the the path.
rclone mkdir remote:path
Make the path if it doesn't already exist
rclone rmdir remote:path
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.
rclone purge remote:path
Remove the path and all of its contents.
rclone check source:path dest:path
Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
General options:
```
--checkers=8: Number of checkers to run in parallel.
--config="~/.rclone.conf": Config file.
-n, --dry-run=false: Do a trial run with no permanent changes
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats
--transfers=4: Number of file transfers to run in parallel.
-v, --verbose=false: Print lots more stuff
```
Developer options:
```
--cpuprofile="": Write cpu profile to file
```
Local Filesystem
----------------
Paths are specified as normal filesystem paths, so
rclone sync /home/source /tmp/destination
Will sync `/home/source` to `/tmp/destination`
Swift / Rackspace cloudfiles / Memset Memstore
----------------------------------------------
Paths are specified as remote:container (or remote: for the `lsd`
command.) You may put subdirectories in too, eg
`remote:container/path/to/dir`.
So to copy a local directory to a swift container called backup:
rclone sync /home/source swift:backup
The modified time is stored as metadata on the object as
`X-Object-Meta-Mtime` as floating point since the epoch.
This is a defacto standard (used in the official python-swiftclient
amongst others) for storing the modification time (as read using
os.Stat) for an object.
Amazon S3
---------
Paths are specified as remote:bucket. You may put subdirectories in
too, eg `remote:bucket/path/to/dir`.
So to copy a local directory to a s3 container called backup
rclone sync /home/source s3:backup
The modified time is stored as metadata on the object as
`X-Amz-Meta-Mtime` as floating point since the epoch.
Google drive
------------
Paths are specified as drive:path Drive paths may be as deep as required.
The initial setup for drive involves getting a token from Google drive
which you need to do in your browser. `rclone config` walks you
through it.
To copy a local directory to a drive directory called backup
rclone copy /home/source drv:backup
Google drive stores modification times accurate to 1 ms.
Single file copies
------------------
Rclone can copy single files
rclone src:path/to/file dst:path/dir
Or
rclone src:path/to/file dst:path/to/file
Note that you can't rename the file if you are copying from one file to another.
License
-------
This is free software under the terms of MIT the license (check the
COPYING file included in this package).
Bugs
----
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work
Changelog
---------
* v0.98 - 2014-05-30
* s3: Treat missing Content-Length as 0 for some ceph installations
* rclonetest: add file with a space in
* v0.97 - 2014-05-05
* Implement copying of single files
* s3 & swift: support paths inside containers/buckets
* v0.96 - 2014-04-24
* drive: Fix multiple files of same name being created
* drive: Use o.Update and fs.Put to optimise transfers
* Add version number, -V and --version
* v0.95 - 2014-03-28
* rclone.org: website, docs and graphics
* drive: fix path parsing
* v0.94 - 2014-03-27
* Change remote format one last time
* GNU style flags
* v0.93 - 2014-03-16
* drive: store token in config file
* cross compile other versions
* set strict permissions on config file
* v0.92 - 2014-03-15
* Config fixes and --config option
* v0.91 - 2014-03-15
* Make config file
* v0.90 - 2013-06-27
* Project named rclone
* v0.00 - 2012-11-18
* Project started
Contact and support
-------------------
The project website is at:
* https://github.com/ncw/rclone
There you can file bug reports, ask for help or send pull requests.
Authors
-------
* Nick Craig-Wood <nick@craig-wood.com>
Contributors
------------
* Your name goes here!

22
RELEASE.md Normal file
View File

@@ -0,0 +1,22 @@
Required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages
* [gox](https://github.com/mitchellh/gox) for cross compiling
* Run `gox -build-toolchain`
* This assumes you have your own source checkout
* pandoc for making the html and man pages
* errcheck - go get github.com/kisielk/errcheck
* golint - go get github.com/golang/lint
Making a release
* go get -u -f -v ./...
* make check
* make test
* make tag
* edit docs/content/changelog.md
* git commit -a -v
* make retag
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross - not required for go >= 1.5
* make cross
* make upload
* make upload_website
* git push --tags origin master

View File

@@ -0,0 +1,706 @@
// Package amazonclouddrive provides an interface to the Amazon Cloud
// Drive object storage system.
package amazonclouddrive
/*
FIXME make searching for directory in id and file in id more efficient
- use the name: search parameter - remember the escaping rules
- use Folder GetNode and GetFile
FIXME make the default for no files and no dirs be (FILE & FOLDER) so
we ignore assets completely!
*/
import (
"fmt"
"io"
"log"
"net/http"
"regexp"
"strings"
"sync"
"time"
"github.com/ncw/go-acd"
"github.com/ncw/rclone/dircache"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/pacer"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "amzn1.application-oa2-client.6bf18d2d1f5b485c94c8988bb03ad0e7"
rcloneClientSecret = "k8/NyszKm5vEkZXAwsbGkd6C3NrbjIqMg4qEhIeF14Szub2wur+/teS3ubXgsLe9//+tr/qoqK+lq6mg8vWkoA=="
folderKind = "FOLDER"
fileKind = "FILE"
assetKind = "ASSET"
statusAvailable = "AVAILABLE"
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time.Millisecond
)
// Globals
var (
// Description of how to auth for this app
acdConfig = &oauth2.Config{
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
Endpoint: oauth2.Endpoint{
AuthURL: "https://www.amazon.com/ap/oa",
TokenURL: "https://api.amazon.com/auth/o2/token",
},
ClientID: rcloneClientID,
ClientSecret: fs.Reveal(rcloneClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "amazon cloud drive",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config(name, acdConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: oauthutil.ConfigClientID,
Help: "Amazon Application Client Id - leave blank normally.",
}, {
Name: oauthutil.ConfigClientSecret,
Help: "Amazon Application Client Secret - leave blank normally.",
}},
})
}
// Fs represents a remote acd server
type Fs struct {
name string // name of this remote
c *acd.Client // the connection to the acd server
root string // the path we are working on
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
}
// Object describes a acd object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
info *acd.Node // Info from the acd object if known
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Amazon cloud drive root '%s'", f.root)
}
// Pattern to match a acd path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parsePath parses an acd 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
409, // Conflict - happens in the unit tests a lot
503, // Service Unavailable
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
root = parsePath(root)
oAuthClient, err := oauthutil.NewClient(name, acdConfig)
if err != nil {
log.Fatalf("Failed to configure amazon cloud drive: %v", err)
}
c := acd.NewClient(oAuthClient)
c.UserAgent = fs.UserAgent
f := &Fs{
name: name,
root: root,
c: c,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
}
// Update endpoints
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
_, resp, err = f.c.Account.GetEndpoints()
return shouldRetry(resp, err)
})
if err != nil {
return nil, fmt.Errorf("Failed to get endpoints: %v", err)
}
// Get rootID
var rootInfo *acd.Folder
err = f.pacer.Call(func() (bool, error) {
rootInfo, resp, err = f.c.Nodes.GetRoot()
return shouldRetry(resp, err)
})
if err != nil || rootInfo.Id == nil {
return nil, fmt.Errorf("Failed to get root: %v", err)
}
f.dirCache = dircache.New(root, *rootInfo.Id, f)
// Find the current root
err = f.dirCache.FindRoot(false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
newF := *f
newF.dirCache = dircache.New(newRoot, *rootInfo.Id, &newF)
newF.root = newRoot
// Make new Fs which is the parent
err = newF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
obj := newF.newFsObjectWithInfo(remote, nil)
if obj == nil {
// File doesn't exist so return old f
return f, nil
}
// return a Fs Limited to this object
return fs.NewLimited(&newF, obj), nil
}
return f, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *Fs) newFsObjectWithInfo(remote string, info *acd.Node) fs.Object {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info but not meta
o.info = info
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
// logged already FsDebug("Failed to read info: %s", err)
return nil
}
}
return o
}
// NewFsObject returns an FsObject from a path
//
// May return nil if an error occurred
func (f *Fs) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
//fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response
var subFolder *acd.Folder
err = f.pacer.Call(func() (bool, error) {
subFolder, resp, err = folder.GetFolder(leaf)
return shouldRetry(resp, err)
})
if err != nil {
if err == acd.ErrorNodeNotFound {
//fs.Debug(f, "...Not found")
return "", false, nil
}
//fs.Debug(f, "...Error %v", err)
return "", false, err
}
if subFolder.Status != nil && *subFolder.Status != statusAvailable {
fs.Debug(f, "Ignoring folder %q in state %q", *subFolder.Status)
time.Sleep(1 * time.Second) // FIXME wait for problem to go away!
return "", false, nil
}
//fs.Debug(f, "...Found(%q, %v)", *subFolder.Id, leaf)
return *subFolder.Id, true, nil
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response
var info *acd.Folder
err = f.pacer.Call(func() (bool, error) {
info, resp, err = folder.CreateFolder(leaf)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
return "", err
}
//fmt.Printf("...Id %q\n", *info.Id)
return *info.Id, nil
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func(*acd.Node) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
query := "parents:" + dirID
if directoriesOnly {
query += " AND kind:" + folderKind
} else if filesOnly {
query += " AND kind:" + fileKind
} else {
// FIXME none of these work
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
}
opts := acd.NodeListOptions{
Filters: query,
}
var nodes []*acd.Node
//var resp *http.Response
OUTER:
for {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
return shouldRetry(resp, err)
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list files: %v", err)
break
}
if nodes == nil {
break
}
for _, node := range nodes {
if node.Name != nil && node.Id != nil && node.Kind != nil && node.Status != nil {
// Ignore nodes if not AVAILABLE
if *node.Status != statusAvailable {
continue
}
if fn(node) {
found = true
break OUTER
}
}
}
}
return
}
// Path should be directory path either "" or "path/"
//
// List the directory using a recursive list from the root
//
// This fetches the minimum amount of stuff but does more API calls
// which makes it slow
func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
var subError error
// Make the API request
var wg sync.WaitGroup
_, err := f.listAll(dirID, "", false, false, func(node *acd.Node) bool {
// Recurse on directories
switch *node.Kind {
case folderKind:
wg.Add(1)
folder := path + *node.Name + "/"
fs.Debug(f, "Reading %s", folder)
go func() {
defer wg.Done()
err := f.listDirRecursive(*node.Id, folder, out)
if err != nil {
subError = err
fs.ErrorLog(f, "Error reading %s:%s", folder, err)
}
}()
return false
case fileKind:
if fs := f.newFsObjectWithInfo(path+*node.Name, node); fs != nil {
out <- fs
}
default:
// ignore ASSET etc
}
return false
})
wg.Wait()
fs.Debug(f, "Finished reading %s", path)
if err != nil {
return err
}
if subError != nil {
return subError
}
return nil
}
// List walks the path returning a channel of FsObjects
func (f *Fs) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
defer close(out)
err := f.dirCache.FindRoot(false)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "List failed: %s", err)
}
}
}()
return out
}
// ListDir lists the directories
func (f *Fs) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
err := f.dirCache.FindRoot(false)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
_, err := f.listAll(f.dirCache.RootID(), "", true, false, func(item *acd.Node) bool {
dir := &fs.Dir{
Name: *item.Name,
Bytes: -1,
Count: -1,
}
dir.When, _ = time.Parse(timeFormat, *item.ModifiedDate)
out <- dir
return false
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "ListDir failed: %s", err)
}
}
}()
return out
}
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
leaf, directoryID, err := f.dirCache.FindPath(remote, true)
if err != nil {
return nil, err
}
folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
var info *acd.File
var resp *http.Response
err = f.pacer.CallNoRetry(func() (bool, error) {
if size != 0 {
info, resp, err = folder.Put(in, leaf)
} else {
info, resp, err = folder.PutSized(in, size, leaf)
}
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
o.info = info.Node
return o, nil
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir() error {
return f.dirCache.FindRoot(true)
}
// purgeCheck remotes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(check bool) error {
if f.root == "" {
return fmt.Errorf("Can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(false)
if err != nil {
return err
}
rootID := dc.RootID()
if check {
// check directory is empty
empty := true
_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
switch *node.Kind {
case folderKind:
empty = false
return true
case fileKind:
empty = false
return true
default:
fs.Debug("Found ASSET %s", *node.Id)
}
return false
})
if err != nil {
return err
}
if !empty {
return fmt.Errorf("Directory not empty")
}
}
node := acd.NodeFromId(rootID, f.c.Nodes)
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = node.Trash()
return shouldRetry(resp, err)
})
if err != nil {
return err
}
f.dirCache.ResetRoot()
if err != nil {
return err
}
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir() error {
return f.purgeCheck(true)
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// srcObj, ok := src.(*Object)
// if !ok {
// fs.Debug(src, "Can't copy - not same remote type")
// return nil, fs.ErrorCantCopy
// }
// srcFs := srcObj.fs
// _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
// if err != nil {
// return nil, err
// }
// return f.NewFsObject(remote), nil
//}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
return f.purgeCheck(false)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Md5sum() (string, error) {
if o.info.ContentProperties.Md5 != nil {
return *o.info.ContentProperties.Md5, nil
}
return "", nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return int64(*o.info.ContentProperties.Size)
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
if o.info != nil {
return nil
}
leaf, directoryID, err := o.fs.dirCache.FindPath(o.remote, false)
if err != nil {
return err
}
folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
var resp *http.Response
var info *acd.File
err = o.fs.pacer.Call(func() (bool, error) {
info, resp, err = folder.GetFile(leaf)
return shouldRetry(resp, err)
})
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
}
o.info = info.Node
return nil
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate)
if err != nil {
fs.Log(o, "Failed to read mtime from object: %s", err)
return time.Now()
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) {
// FIXME not implemented
return
}
// Storable returns a boolean showing whether this object storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open() (in io.ReadCloser, err error) {
file := acd.File{Node: o.info}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
in, resp, err = file.Open()
return shouldRetry(resp, err)
})
return in, err
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
file := acd.File{Node: o.info}
var info *acd.File
var resp *http.Response
var err error
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
if size != 0 {
info, resp, err = file.OverwriteSized(in, size)
} else {
info, resp, err = file.Overwrite(in)
}
return shouldRetry(resp, err)
})
if err != nil {
return err
}
o.info = info.Node
return nil
}
// Remove an object
func (o *Object) Remove() error {
var resp *http.Response
var err error
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.info.Trash()
return shouldRetry(resp, err)
})
return err
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
// _ fs.Copier = (*Fs)(nil)
// _ fs.Mover = (*Fs)(nil)
// _ fs.DirMover = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -0,0 +1,56 @@
// Test AmazonCloudDrive filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package amazonclouddrive_test
import (
"testing"
"github.com/ncw/rclone/amazonclouddrive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func init() {
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
fstests.RemoteName = "TestAmazonCloudDrive:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

21
appveyor.yml Normal file
View File

@@ -0,0 +1,21 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone
environment:
GOPATH: c:\gopath
install:
- go get golang.org/x/tools/cmd/vet
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go get -d ./...
build_script:
- go vet ./...
- go test -v -cpu=2 ./...
- go test -cpu=2 -short -race ./...

View File

@@ -3,7 +3,7 @@
set -e
# This uses gox from https://github.com/mitchellh/gox
# Make sure you've run gox -build-toolchain
# Make sure you've run gox -build-toolchain - not required for go >= 1.5
if [ "$1" == "" ]; then
echo "Syntax: $0 Version"
@@ -13,7 +13,9 @@ VERSION="$1"
rm -rf build
gox -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}"
gox -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}" -os "darwin linux freebsd openbsd windows freebsd netbsd plan9 solaris"
# Not implemented yet: nacl dragonfly android
# gox -osarch-list for definitive list
mv build/rclone-${VERSION}-darwin-amd64 build/rclone-${VERSION}-osx-amd64
mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
@@ -21,10 +23,12 @@ mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
cd build
for d in `ls`; do
cp -a ../README.txt $d/
cp -a ../README.html $d/
cp -a ../MANUAL.txt $d/README.txt
cp -a ../MANUAL.html $d/README.html
cp -a ../rclone.1 $d/
zip -r9 $d.zip $d
d_current=${d/-${VERSION}/-current}
ln $d.zip $d_current.zip
rm -rf $d
done

272
dircache/dircache.go Normal file
View File

@@ -0,0 +1,272 @@
// Package dircache provides a simple cache for caching directory to path lookups
package dircache
// _methods are called without the lock
import (
"fmt"
"log"
"strings"
"sync"
)
// DirCache caches paths to directory IDs and vice versa
type DirCache struct {
cacheMu sync.RWMutex
cache map[string]string
invCache map[string]string
mu sync.Mutex
fs DirCacher // Interface to find and make stuff
trueRootID string // ID of the absolute root
root string // the path we are working on
rootID string // ID of the root directory
rootParentID string // ID of the root's parent directory
foundRoot bool // Whether we have found the root or not
}
// DirCacher describes an interface for doing the low level directory work
type DirCacher interface {
FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error)
CreateDir(pathID, leaf string) (newID string, err error)
}
// New makes a DirCache
//
// The cache is safe for concurrent use
func New(root string, trueRootID string, fs DirCacher) *DirCache {
d := &DirCache{
trueRootID: trueRootID,
root: root,
fs: fs,
}
d.Flush()
d.ResetRoot()
return d
}
// Get an ID given a path
func (dc *DirCache) Get(path string) (id string, ok bool) {
dc.cacheMu.RLock()
id, ok = dc.cache[path]
dc.cacheMu.RUnlock()
return
}
// GetInv gets a path given an ID
func (dc *DirCache) GetInv(id string) (path string, ok bool) {
dc.cacheMu.RLock()
path, ok = dc.invCache[id]
dc.cacheMu.RUnlock()
return
}
// Put a path, id into the map
func (dc *DirCache) Put(path, id string) {
dc.cacheMu.Lock()
dc.cache[path] = id
dc.invCache[id] = path
dc.cacheMu.Unlock()
}
// Flush the map of all data
func (dc *DirCache) Flush() {
dc.cacheMu.Lock()
dc.cache = make(map[string]string)
dc.invCache = make(map[string]string)
dc.cacheMu.Unlock()
}
// SplitPath splits a path into directory, leaf
//
// Path shouldn't start or end with a /
//
// If there are no slashes then directory will be "" and leaf = path
func SplitPath(path string) (directory, leaf string) {
lastSlash := strings.LastIndex(path, "/")
if lastSlash >= 0 {
directory = path[:lastSlash]
leaf = path[lastSlash+1:]
} else {
directory = ""
leaf = path
}
return
}
// FindDir finds the directory passed in returning the directory ID
// starting from pathID
//
// Path shouldn't start or end with a /
//
// If create is set it will make the directory if not found
//
// Algorithm:
// Look in the cache for the path, if found return the pathID
// If not found strip the last path off the path and recurse
// Now have a parent directory id, so look in the parent for self and return it
func (dc *DirCache) FindDir(path string, create bool) (pathID string, err error) {
dc.mu.Lock()
defer dc.mu.Unlock()
return dc._findDir(path, create)
}
// Look for the root and in the cache - safe to call without the mu
func (dc *DirCache) _findDirInCache(path string) string {
// fmt.Println("Finding",path,"create",create,"cache",cache)
// If it is the root, then return it
if path == "" {
// fmt.Println("Root")
return dc.rootID
}
// If it is in the cache then return it
pathID, ok := dc.Get(path)
if ok {
// fmt.Println("Cache hit on", path)
return pathID
}
return ""
}
// Unlocked findDir - must have mu
func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error) {
pathID = dc._findDirInCache(path)
if pathID != "" {
return pathID, nil
}
// Split the path into directory, leaf
directory, leaf := SplitPath(path)
// Recurse and find pathID for parent directory
parentPathID, err := dc._findDir(directory, create)
if err != nil {
return "", err
}
// Find the leaf in parentPathID
pathID, found, err := dc.fs.FindLeaf(parentPathID, leaf)
if err != nil {
return "", err
}
// If not found create the directory if required or return an error
if !found {
if create {
pathID, err = dc.fs.CreateDir(parentPathID, leaf)
if err != nil {
return "", fmt.Errorf("Failed to make directory: %v", err)
}
} else {
return "", fmt.Errorf("Couldn't find directory: %q", path)
}
}
// Store the leaf directory in the cache
dc.Put(path, pathID)
// fmt.Println("Dir", path, "is", pathID)
return pathID, nil
}
// FindPath finds the leaf and directoryID from a path
//
// If create is set parent directories will be created if they don't exist
func (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string, err error) {
dc.mu.Lock()
defer dc.mu.Unlock()
directory, leaf := SplitPath(path)
directoryID, err = dc._findDir(directory, create)
if err != nil {
if create {
err = fmt.Errorf("Couldn't find or make directory %q: %s", directory, err)
} else {
err = fmt.Errorf("Couldn't find directory %q: %s", directory, err)
}
}
return
}
// FindRoot finds the root directory if not already found
//
// Resets the root directory
//
// If create is set it will make the directory if not found
func (dc *DirCache) FindRoot(create bool) error {
dc.mu.Lock()
defer dc.mu.Unlock()
if dc.foundRoot {
return nil
}
rootID, err := dc._findDir(dc.root, create)
if err != nil {
return err
}
dc.foundRoot = true
dc.rootID = rootID
// Find the parent of the root while we still have the root
// directory tree cached
rootParentPath, _ := SplitPath(dc.root)
dc.rootParentID, _ = dc.Get(rootParentPath)
// Reset the tree based on dc.root
dc.Flush()
// Put the root directory in
dc.Put("", dc.rootID)
return nil
}
// FoundRoot returns whether the root directory has been found yet
//
// Call this from FindLeaf or CreateDir only
func (dc *DirCache) FoundRoot() bool {
return dc.foundRoot
}
// RootID returns the ID of the root directory
//
// This should be called after FindRoot
func (dc *DirCache) RootID() string {
dc.mu.Lock()
defer dc.mu.Unlock()
if !dc.foundRoot {
log.Fatalf("Internal Error: RootID() called before FindRoot")
}
return dc.rootID
}
// RootParentID returns the ID of the parent of the root directory
//
// This should be called after FindRoot
func (dc *DirCache) RootParentID() (string, error) {
dc.mu.Lock()
defer dc.mu.Unlock()
if !dc.foundRoot {
return "", fmt.Errorf("Internal Error: RootID() called before FindRoot")
}
if dc.rootParentID == "" {
return "", fmt.Errorf("Internal Error: Didn't find rootParentID")
}
if dc.rootID == dc.trueRootID {
return "", fmt.Errorf("Is root directory")
}
return dc.rootParentID, nil
}
// ResetRoot resets the root directory to the absolute root and clears
// the DirCache
func (dc *DirCache) ResetRoot() {
dc.mu.Lock()
defer dc.mu.Unlock()
dc.foundRoot = false
dc.Flush()
// Put the true root in
dc.rootID = dc.trueRootID
// Put the root directory in
dc.Put("", dc.rootID)
}

View File

@@ -5,7 +5,7 @@
"menu": "menu"
},
"baseurl": "http://rclone.org",
"title": "rclone - rsync for object storage",
"description": "rclone - rsync for object storage: google drive, s3, swift, cloudfiles, memstore...",
"title": "rclone - rsync for cloud storage",
"description": "rclone - rsync for cloud storage: google drive, s3, swift, cloudfiles, dropbox, memstore...",
"canonifyurls": true
}

View File

@@ -1,8 +1,8 @@
---
title: "Rclone"
description: "rclone syncs files to and from Google Drive, S3, Swift and Cloudfiles."
description: "rclone syncs files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox, Google Cloud Storage and Amazon Cloud Drive."
type: page
date: "2014-04-26"
date: "2015-09-06"
groups: ["about"]
---
@@ -16,6 +16,10 @@ Rclone is a command line program to sync files and directories to and from
* Google Drive
* Amazon S3
* Openstack Swift / Rackspace cloud files / Memset Memstore
* Dropbox
* Google Cloud Storage
* Amazon Cloud Drive
* Microsoft One Drive
* The local filesystem
Features

View File

@@ -0,0 +1,104 @@
---
title: "Amazon Cloud Drive"
description: "Rclone docs for Amazon Cloud Drive"
date: "2015-09-06"
---
<i class="fa fa-amazon"></i> Amazon Cloud Drive
-----------------------------------------
Paths are specified as `remote:path`
Paths may be as deep as required, eg `remote:directory/subdirectory`.
The initial setup for Amazon cloud drive involves getting a token from
Amazon which you need to do in your browser. `rclone config` walks
you through it.
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) amazon cloud drive
2) drive
3) dropbox
4) google cloud storage
5) local
6) s3
7) swift
type> 1
Amazon Application Client Id - leave blank normally.
client_id>
Amazon Application Client Secret - leave blank normally.
client_secret>
Remote config
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
Got code
--------------------
[remote]
client_id =
client_secret =
token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxx","token_type":"bearer","refresh_token":"xxxxxxxxxxxxxxxxxx","expiry":"2015-09-06T16:07:39.658438471+01:00"}
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
Note that rclone runs a webserver on your local machine to collect the
token as returned from Amazon. This only runs from the moment it
opens your browser to the moment you get back the verification
code. This is on `http://127.0.0.1:53682/` and this it may require
you to unblock it temporarily if you are running a host firewall.
Once configured you can then use `rclone` like this,
List directories in top level of your Amazon cloud drive
rclone lsd remote:
List all the files in your Amazon cloud drive
rclone ls remote:
To copy a local directory to an Amazon cloud drive directory called backup
rclone copy /home/source remote:backup
### Modified time and MD5SUMs ###
Amazon cloud drive doesn't allow modification times to be changed via
the API so these won't be accurate or used for syncing.
It does store MD5SUMs so for a more accurate sync, you can use the
`--checksum` flag.
### Deleting files ###
Any files you delete with rclone will end up in the trash. Amazon
don't provide an API to permanently delete files, nor to empty the
trash, so you will have to do that with one of Amazon's apps or via
the Amazon cloud drive website.
### Limitations ###
Note that Amazon cloud drive is case sensitive so you can't have a
file called "Hello.doc" and one called "hello.doc".
Amazon cloud drive has rate limiting so you may notice errors in the
sync (429 errors). rclone will automatically retry the sync up to 3
times by default (see `--retries` flag) which should hopefully work
around this problem.

20
docs/content/authors.md Normal file
View File

@@ -0,0 +1,20 @@
---
title: "Authors"
description: "Rclone Authors and Contributors"
date: "2015-09-28"
---
Authors
-------
* Nick Craig-Wood <nick@craig-wood.com>
Contributors
------------
* Alex Couper <amcouper@gmail.com>
* Leonid Shalupov <leonid@shalupov.com>
* Shimon Doodkin <helpmepro1@gmail.com>
* Colin Nicholson <colin@colinn.com>
* Klaus Post <klauspost@gmail.com>
* Sergey Tolmachev <tolsi.ru@gmail.com>

31
docs/content/bugs.md Normal file
View File

@@ -0,0 +1,31 @@
---
title: "Bugs"
description: "Rclone Bugs and Limitations"
date: "2014-06-16"
---
Bugs and Limitations
--------------------
### Empty directories are left behind / not created ##
With remotes that have a concept of directory, eg Local and Drive,
empty directories may be left behind, or not created when one was
expected.
This is because rclone doesn't have a concept of a directory - it only
works on objects. Most of the object storage systems can't actually
store a directory so there is nowhere for rclone to store anything
about directories.
You can work round this to some extent with the`purge` command which
will delete everything under the path, **inluding** empty directories.
This may be fixed at some point in
[Issue #100](https://github.com/ncw/rclone/issues/100)
### Directory timestamps aren't preserved ##
For the same reason as the above, rclone doesn't have a concept of a
directory - it only works on objects, therefore it can't preserve the
timestamps of directories.

207
docs/content/changelog.md Normal file
View File

@@ -0,0 +1,207 @@
---
title: "Documentation"
description: "Rclone Changelog"
date: "2015-11-07"
---
Changelog
---------
* v1.24 - 2015-11-07
* New features
* Add support for Microsoft One Drive
* Add `--no-check-certificate` option to disable server certificate verification
* Add async readahead buffer for faster transfer of big files
* Fixes
* Allow spaces in remotes and check remote names for validity at creation time
* Allow '&' and disallow ':' in Windows filenames.
* Swift
* Ignore directory marker objects where appropriate - allows working with Hubic
* Don't delete the container if fs wasn't at root
* S3
* Don't delete the bucket if fs wasn't at root
* Google Cloud Storage
* Don't delete the bucket if fs wasn't at root
* v1.23 - 2015-10-03
* New features
* Implement `rclone size` for measuring remotes
* Fixes
* Fix headless config for drive and gcs
* Tell the user they should try again if the webserver method failed
* Improve output of `--dump-headers`
* S3
* Allow anonymous access to public buckets
* Swift
* Stop chunked operations logging "Failed to read info: Object Not Found"
* Use Content-Length on uploads for extra reliability
* v1.22 - 2015-09-28
* Implement rsync like include and exclude flags
* swift
* Support files > 5GB - thanks Sergey Tolmachev
* v1.21 - 2015-09-22
* New features
* Display individual transfer progress
* Make lsl output times in localtime
* Fixes
* Fix allowing user to override credentials again in Drive, GCS and ACD
* Amazon Cloud Drive
* Implement compliant pacing scheme
* Google Drive
* Make directory reads concurrent for increased speed.
* v1.20 - 2015-09-15
* New features
* Amazon Cloud Drive support
* Oauth support redone - fix many bugs and improve usability
* Use "golang.org/x/oauth2" as oauth libary of choice
* Improve oauth usability for smoother initial signup
* drive, googlecloudstorage: optionally use auto config for the oauth token
* Implement --dump-headers and --dump-bodies debug flags
* Show multiple matched commands if abbreviation too short
* Implement server side move where possible
* local
* Always use UNC paths internally on Windows - fixes a lot of bugs
* dropbox
* force use of our custom transport which makes timeouts work
* Thanks to Klaus Post for lots of help with this release
* v1.19 - 2015-08-28
* New features
* Server side copies for s3/swift/drive/dropbox/gcs
* Move command - uses server side copies if it can
* Implement --retries flag - tries 3 times by default
* Build for plan9/amd64 and solaris/amd64 too
* Fixes
* Make a current version download with a fixed URL for scripting
* Ignore rmdir in limited fs rather than throwing error
* dropbox
* Increase chunk size to improve upload speeds massively
* Issue an error message when trying to upload bad file name
* v1.18 - 2015-08-17
* drive
* Add `--drive-use-trash` flag so rclone trashes instead of deletes
* Add "Forbidden to download" message for files with no downloadURL
* dropbox
* Remove datastore
* This was deprecated and it caused a lot of problems
* Modification times and MD5SUMs no longer stored
* Fix uploading files > 2GB
* s3
* use official AWS SDK from github.com/aws/aws-sdk-go
* **NB** will most likely require you to delete and recreate remote
* enable multipart upload which enables files > 5GB
* tested with Ceph / RadosGW / S3 emulation
* many thanks to Sam Liston and Brian Haymore at the [Utah
Center for High Performance Computing](https://www.chpc.utah.edu/) for a Ceph test account
* misc
* Show errors when reading the config file
* Do not print stats in quiet mode - thanks Leonid Shalupov
* Add FAQ
* Fix created directories not obeying umask
* Linux installation instructions - thanks Shimon Doodkin
* v1.17 - 2015-06-14
* dropbox: fix case insensitivity issues - thanks Leonid Shalupov
* v1.16 - 2015-06-09
* Fix uploading big files which was causing timeouts or panics
* Don't check md5sum after download with --size-only
* v1.15 - 2015-06-06
* Add --checksum flag to only discard transfers by MD5SUM - thanks Alex Couper
* Implement --size-only flag to sync on size not checksum & modtime
* Expand docs and remove duplicated information
* Document rclone's limitations with directories
* dropbox: update docs about case insensitivity
* v1.14 - 2015-05-21
* local: fix encoding of non utf-8 file names - fixes a duplicate file problem
* drive: docs about rate limiting
* google cloud storage: Fix compile after API change in "google.golang.org/api/storage/v1"
* v1.13 - 2015-05-10
* Revise documentation (especially sync)
* Implement --timeout and --conntimeout
* s3: ignore etags from multipart uploads which aren't md5sums
* v1.12 - 2015-03-15
* drive: Use chunked upload for files above a certain size
* drive: add --drive-chunk-size and --drive-upload-cutoff parameters
* drive: switch to insert from update when a failed copy deletes the upload
* core: Log duplicate files if they are detected
* v1.11 - 2015-03-04
* swift: add region parameter
* drive: fix crash on failed to update remote mtime
* In remote paths, change native directory separators to /
* Add synchronization to ls/lsl/lsd output to stop corruptions
* Ensure all stats/log messages to go stderr
* Add --log-file flag to log everything (including panics) to file
* Make it possible to disable stats printing with --stats=0
* Implement --bwlimit to limit data transfer bandwidth
* v1.10 - 2015-02-12
* s3: list an unlimited number of items
* Fix getting stuck in the configurator
* v1.09 - 2015-02-07
* windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
* local: Fix directory separators on Windows
* drive: fix rate limit exceeded errors
* v1.08 - 2015-02-04
* drive: fix subdirectory listing to not list entire drive
* drive: Fix SetModTime
* dropbox: adapt code to recent library changes
* v1.07 - 2014-12-23
* google cloud storage: fix memory leak
* v1.06 - 2014-12-12
* Fix "Couldn't find home directory" on OSX
* swift: Add tenant parameter
* Use new location of Google API packages
* v1.05 - 2014-08-09
* Improved tests and consequently lots of minor fixes
* core: Fix race detected by go race detector
* core: Fixes after running errcheck
* drive: reset root directory on Rmdir and Purge
* fs: Document that Purger returns error on empty directory, test and fix
* google cloud storage: fix ListDir on subdirectory
* google cloud storage: re-read metadata in SetModTime
* s3: make reading metadata more reliable to work around eventual consistency problems
* s3: strip trailing / from ListDir()
* swift: return directories without / in ListDir
* v1.04 - 2014-07-21
* google cloud storage: Fix crash on Update
* v1.03 - 2014-07-20
* swift, s3, dropbox: fix updated files being marked as corrupted
* Make compile with go 1.1 again
* v1.02 - 2014-07-19
* Implement Dropbox remote
* Implement Google Cloud Storage remote
* Verify Md5sums and Sizes after copies
* Remove times from "ls" command - lists sizes only
* Add add "lsl" - lists times and sizes
* Add "md5sum" command
* v1.01 - 2014-07-04
* drive: fix transfer of big files using up lots of memory
* v1.00 - 2014-07-03
* drive: fix whole second dates
* v0.99 - 2014-06-26
* Fix --dry-run not working
* Make compatible with go 1.1
* v0.98 - 2014-05-30
* s3: Treat missing Content-Length as 0 for some ceph installations
* rclonetest: add file with a space in
* v0.97 - 2014-05-05
* Implement copying of single files
* s3 & swift: support paths inside containers/buckets
* v0.96 - 2014-04-24
* drive: Fix multiple files of same name being created
* drive: Use o.Update and fs.Put to optimise transfers
* Add version number, -V and --version
* v0.95 - 2014-03-28
* rclone.org: website, docs and graphics
* drive: fix path parsing
* v0.94 - 2014-03-27
* Change remote format one last time
* GNU style flags
* v0.93 - 2014-03-16
* drive: store token in config file
* cross compile other versions
* set strict permissions on config file
* v0.92 - 2014-03-15
* Config fixes and --config option
* v0.91 - 2014-03-15
* Make config file
* v0.90 - 2013-06-27
* Project named rclone
* v0.00 - 2012-11-18
* Project started

View File

@@ -5,8 +5,17 @@ date: "2014-04-26"
---
Contact the rclone project
--------------------------
The project website is at:
* https://github.com/ncw/rclone
There you can file bug reports, ask for help or contribute pull
requests.
See also
* [Github project page for source, reporting bugs and pull requests](http://github.com/ncw/rclone)
* <a href="https://google.com/+RcloneOrg" rel="publisher">Google+ page for general comments</a></li>
Or email [Nick Craig-Wood](mailto:nick@craig-wood.com)

View File

@@ -1,22 +1,9 @@
---
title: "Documentation"
description: "Rclone Documentation"
date: "2014-04-26"
description: "Rclone Usage"
date: "2015-06-06"
---
Install
-------
Rclone is a Go program and comes as a single binary file.
[Download](/downloads/) the relevant binary.
Or alternatively if you have Go installed use
go get github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`.
Configure
---------
@@ -30,11 +17,13 @@ option:
rclone config
See below for detailed instructions for
See the following for detailed instructions for
* [Google drive](/drive/)
* [Amazon S3](/s3/)
* [Swift / Rackspace Cloudfiles / Memset Memstore](/swift/)
* [Dropbox](/dropbox/)
* [Google Cloud Storage](/googlecloudstorage/)
* [Local filesystem](/local/)
Usage
@@ -55,96 +44,283 @@ You can define as many storage paths as you like in the config file.
Subcommands
-----------
rclone copy source:path dest:path
### rclone copy source:path dest:path ###
Copy the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
unchanged files, testing by size and modification time or
MD5SUM. Doesn't delete files from the destination.
rclone sync source:path dest:path
### rclone sync source:path dest:path ###
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the -dry-run flag.
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the `--dry-run` flag.
rclone ls [remote:path]
### rclone ls remote:path ###
List all the objects in the the path.
List all the objects in the the path with size and path.
rclone lsd [remote:path]
### rclone lsd remote:path ###
List all directoryes/objects/buckets in the the path.
List all directories/containers/buckets in the the path.
rclone mkdir remote:path
### rclone lsl remote:path ###
List all the objects in the the path with modification time,
size and path.
### rclone md5sum remote:path ###
Produces an md5sum file for all the objects in the path. This
is in the same format as the standard md5sum tool produces.
### rclone size remote:path ###
Prints the total size of objects in remote:path and the number of
objects.
### rclone mkdir remote:path ###
Make the path if it doesn't already exist
rclone rmdir remote:path
### rclone rmdir remote:path ###
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.
rclone purge remote:path
### rclone purge remote:path ###
Remove the path and all of its contents.
rclone check source:path dest:path
### rclone check source:path dest:path ###
Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
General options:
### rclone config ###
```
--checkers=8: Number of checkers to run in parallel.
--transfers=4: Number of file transfers to run in parallel.
--config="~/.rclone.conf": Config file.
-n, --dry-run=false: Do a trial run with no permanent changes
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats
-v, --verbose=false: Print lots more stuff
```
Enter an interactive configuration session.
Developer options:
### rclone help ###
```
--cpuprofile="": Write cpu profile to file
```
Prints help on rclone commands and options.
License
Server Side Copy
----------------
Drive, S3, Dropbox, Swift and Google Cloud Storage support server side
copy.
This means if you want to copy one folder to another then rclone won't
download all the files and re-upload them; it will instruct the server
to copy them in place.
Eg
rclone copy s3:oldbucket s3:newbucket
Will copy the contents of `oldbucket` to `newbucket` without
downloading and re-uploading.
Remotes which don't support server side copy (eg local) **will**
download and re-upload in this case.
Server side copies are used with `sync` and `copy` and will be
identified in the log when using the `-v` flag.
Server side copies will only be attempted if the remote names are the
same.
This can be used when scripting to make aged backups efficiently, eg
rclone sync remote:current-backup remote:previous-backup
rclone sync /path/to/files remote:current-backup
Options
-------
This is free software under the terms of MIT the license (check the
COPYING file included in this package).
Rclone has a number of options to control its behaviour.
Bugs
----
Options which use TIME use the go time parser. A duration string is a
possibly signed sequence of decimal numbers, each with optional
fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
* Doesn't sync individual files yet, only directories.
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work
Options which use SIZE use kByte by default. However a suffix of `k`
for kBytes, `M` for MBytes and `G` for GBytes may be used. These are
the binary units, eg 2**10, 2**20, 2**30 respectively.
Contact and support
-------------------
### --bwlimit=SIZE ###
The project website is at:
Bandwidth limit in kBytes/s, or use suffix k|M|G. The default is `0`
which means to not limit bandwidth.
* https://github.com/ncw/rclone
For example to limit bandwidth usage to 10 MBytes/s use `--bwlimit 10M`
There you can file bug reports, ask for help or contribute patches.
This only limits the bandwidth of the data transfer, it doesn't limit
the bandwith of the directory listings etc.
Authors
-------
### --checkers=N ###
* Nick Craig-Wood <nick@craig-wood.com>
The number of checkers to run in parallel. Checkers do the equality
checking of files during a sync. For some storage systems (eg s3,
swift, dropbox) this can take a significant amount of time so they are
run in parallel.
Contributors
------------
The default is to run 8 checkers in parallel.
* Your name goes here!
### -c, --checksum ###
Normally rclone will look at modification time and size of files to
see if they are equal. If you set this flag then rclone will check
MD5SUM and size to determine if files are equal.
This is very useful when transferring between remotes which store the
MD5SUM on the object which include swift, s3, drive, and google cloud
storage.
Eg `rclone --checksum sync s3:/bucket swift:/bucket` would run much
quicker than without the `--checksum` flag.
When using this flag, rclone won't update mtimes of remote files if
they are incorrect as it would normally.
### --config=CONFIG_FILE ###
Specify the location of the rclone config file. Normally this is in
your home directory as a file called `.rclone.conf`. If you run
`rclone -h` and look at the help for the `--config` option you will
see where the default location is for you. Use this flag to override
the config location, eg `rclone --config=".myconfig" .config`.
### --contimeout=TIME ###
Set the connection timeout. This should be in go time format which
looks like `5s` for 5 seconds, `10m` for 10 minutes, or `3h30m`.
The connection timeout is the amount of time rclone will wait for a
connection to go through to a remote object storage system. It is
`1m` by default.
### -n, --dry-run ###
Do a trial run with no permanent changes. Use this in combination
with the `-v` flag to see what rclone would do without actually doing
it. Useful when setting up the `sync` command.
### --log-file=FILE ###
Log all of rclone's output to FILE. This is not active by default.
This can be useful for tracking down problems with syncs in
combination with the `-v` flag.
### --modify-window=TIME ###
When checking whether a file has been modified, this is the maximum
allowed time difference that a file can have and still be considered
equivalent.
The default is `1ns` unless this is overridden by a remote. For
example OS X only stores modification times to the nearest second so
if you are reading and writing to an OS X filing system this will be
`1s` by default.
This command line flag allows you to override that computed default.
### -q, --quiet ###
Normally rclone outputs stats and a completion message. If you set
this flag it will make as little output as possible.
### --size-only ###
Normally rclone will look at modification time and size of files to
see if they are equal. If you set this flag then rclone will check
only the size.
This can be useful transferring files from dropbox which have been
modified by the desktop sync client which doesn't set checksums of
modification times in the same way as rclone.
When using this flag, rclone won't update mtimes of remote files if
they are incorrect as it would normally.
### --stats=TIME ###
Rclone will print stats at regular intervals to show its progress.
This sets the interval.
The default is `1m`. Use 0 to disable.
### --timeout=TIME ###
This sets the IO idle timeout. If a transfer has started but then
becomes idle for this long it is considered broken and disconnected.
The default is `5m`. Set to 0 to disable.
### --transfers=N ###
The number of file transfers to run in parallel. It can sometimes be
useful to set this to a smaller number if the remote is giving a lot
of timeouts or bigger if you have lots of bandwidth and a fast remote.
The default is to run 4 file transfers in parallel.
### -v, --verbose ###
If you set this flag, rclone will become very verbose telling you
about every file it considers and transfers.
Very useful for debugging.
### -V, --version ###
Prints the version number
Developer options
-----------------
These options are useful when developing or debugging rclone. There
are also some more remote specific options which aren't documented
here which are used for testing. These start with remote name eg
`--drive-test-option`.
### --cpuprofile=FILE ###
Write cpu profile to file. This can be analysed with `go tool pprof`.
### --no-check-certificate=true/false ###
`--no-check-certificate` controls whether a client verifies the
server's certificate chain and host name.
If `--no-check-certificate` is true, TLS accepts any certificate
presented by the server and any host name in that certificate.
In this mode, TLS is susceptible to man-in-the-middle attacks.
This option defaults to `false`.
**This should be used only for testing.**
Filtering
---------
For the filtering options
* `--delete-excluded`
* `--filter`
* `--filter-from`
* `--exclude`
* `--exclude-from`
* `--include`
* `--include-from`
* `--files-from`
* `--min-size`
* `--max-size`
* `--dump-filters`
See the [filtering section](/filtering/).

30
docs/content/donate.md Normal file
View File

@@ -0,0 +1,30 @@
---
title: "Flowers for My Wife"
description: "Flowers for My Wife."
type: page
date: "2015-09-06"
---
Flowers for My Wife
===================
Rclone is a pure open source for love-not-money project. However I've
had requests for a donation page and coding it does take me away from
something else I love - my wonderful wife.
So if you would like to send a donation, I will use it to buy flowers
for her which will make her very happy.
<form action="https://www.paypal.com/cgi-bin/webscr" method="post" target="_top">
<input type="hidden" name="cmd" value="_s-xclick">
<input type="hidden" name="hosted_button_id" value="XQMMNUD5ZY49J">
<input type="image" src="https://www.paypalobjects.com/en_US/GB/i/btn/btn_donateCC_LG.gif" border="0" name="submit" alt="PayPal The safer, easier way to pay online.">
<img alt="" border="0" src="https://www.paypalobjects.com/en_GB/i/scr/pixel.gif" width="1" height="1">
</form>
If you would prefer to express your gratitude by promoting the
project, or helping with it, I'd be over the moon with that too!
Thanks
Nick

View File

@@ -2,34 +2,73 @@
title: "Rclone downloads"
description: "Download rclone binaries for your OS."
type: page
date: "2014-05-30"
date: "2015-11-07"
---
Rclone Download v0.98
Rclone Download v1.24
=====================
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.98-windows-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.98-osx-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.98-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v0.98-linux-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.24-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.98-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v0.98-freebsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.24-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.98-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v0.98-netbsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.24-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v0.98-openbsd-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v0.98-plan9-386.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-plan9-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-plan9-amd64.zip)
* Solaris
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-solaris-amd64.zip)
Downloads for scripting
=======================
If you would like to download the current version (maybe from a
script) from a URL which doesn't change then you can use these links.
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-plan9-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-plan9-amd64.zip)
* Solaris
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-solaris-amd64.zip)
Older Downloads
==============
Older downloads can be found [here](http://downloads.rclone.org/)

View File

@@ -31,5 +31,44 @@ Rclone Download VERSION
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-plan9-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-plan9-amd64.zip)
* Solaris
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-solaris-amd64.zip)
Downloads for scripting
=======================
If you would like to download the current version (maybe from a
script) from a URL which doesn't change then you can use these links.
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-plan9-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-plan9-amd64.zip)
* Solaris
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-solaris-amd64.zip)
Older Downloads
==============
Older downloads can be found [here](http://downloads.rclone.org/)

View File

@@ -1,13 +1,15 @@
---
title: "Google drive"
description: "Rclone docs for Google drive"
date: "2014-04-26"
date: "2015-09-12"
---
<i class="fa fa-google"></i> Google Drive
-----------------------------------------
Paths are specified as `drive:path`
Drive paths may be as deep as required, eg
`drive:directory/subdirectory`.
Drive paths may be as deep as required, eg `drive:directory/subdirectory`.
The initial setup for drive involves getting a token from Google drive
which you need to do in your browser. `rclone config` walks you
@@ -32,15 +34,21 @@ Choose a number from below
3) local
4) drive
type> 4
Google Application Client Id - leave blank to use rclone's.
Google Application Client Id - leave blank normally.
client_id>
Google Application Client Secret - leave blank to use rclone's.
Google Application Client Secret - leave blank normally.
client_secret>
Remote config
Go to the following link in your browser
https://accounts.google.com/o/oauth2/auth?access_type=&approval_prompt=&client_id=XXXXXXXXXXXX.apps.googleusercontent.com&redirect_uri=urn%3XXXXX%3Awg%3Aoauth%3XX.0%3Aoob&response_type=code&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&state=state
Log in, then type paste the token that is returned in the browser here
Enter verification code> X/XXXXXXXXXXXXXXXXXX-XXXXXXXXX.XXXXXXXXX-XXXXX_XXXXXXX_XXXXXXX
Use auto config?
* Say Y if not sure
* Say N if you are working on a remote or headless machine or Y didn't work
y) Yes
n) No
y/n> y
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
Got code
--------------------
[remote]
client_id =
@@ -53,6 +61,13 @@ d) Delete this remote
y/e/d> y
```
Note that rclone runs a webserver on your local machine to collect the
token as returned from Google if you use auto config mode. This only
runs from the moment it opens your browser to the moment you get back
the verification code. This is on `http://127.0.0.1:53682/` and this
it may require you to unblock it temporarily if you are running a host
firewall, or use manual mode.
You can then use it like this,
List directories in top level of your drive
@@ -67,7 +82,31 @@ To copy a local directory to a drive directory called backup
rclone copy /home/source remote:backup
Modified time
-------------
### Modified time ###
Google drive stores modification times accurate to 1 ms.
### Revisions ###
Google drive stores revisions of files. When you upload a change to
an existing file to google drive using rclone it will create a new
revision of that file.
Revisions follow the standard google policy which at time of writing
was
* They are deleted after 30 days or 100 revisions (whatever comes first).
* They do not count towards a user storage quota.
### Deleting files ###
By default rclone will delete files permanently when requested. If
sending them to the trash is required instead then use the
`--drive-use-trash` flag.
### Limitations ###
Drive has quite a lot of rate limiting. This causes rclone to be
limited to transferring about 2 files per second only. Individual
files may be transferred much faster at 100s of MBytes/s but lots of
small files can take a long time.

88
docs/content/dropbox.md Normal file
View File

@@ -0,0 +1,88 @@
---
title: "Dropbox"
description: "Rclone docs for Dropbox"
date: "2014-07-17"
---
<i class="fa fa-dropbox"></i> Dropbox
---------------------------------
Paths are specified as `remote:path`
Dropbox paths may be as deep as required, eg
`remote:directory/subdirectory`.
The initial setup for dropbox involves getting a token from Dropbox
which you need to do in your browser. `rclone config` walks you
through it.
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) google cloud storage
5) dropbox
6) drive
type> 5
Dropbox App Key - leave blank normally.
app_key>
Dropbox App Secret - leave blank normally.
app_secret>
Remote config
Please visit:
https://www.dropbox.com/1/oauth2/authorize?client_id=XXXXXXXXXXXXXXX&response_type=code
Enter the code: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXXXXXXXX
--------------------
[remote]
app_key =
app_secret =
token = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
You can then use it like this,
List directories in top level of your dropbox
rclone lsd remote:
List all the files in your dropbox
rclone ls remote:
To copy a local directory to a dropbox directory called backup
rclone copy /home/source remote:backup
### Modified time and MD5SUMs ###
Dropbox doesn't have the capability of storing modification times or
MD5SUMs so syncs will effectively have the `--size-only` flag set.
### Limitations ###
Note that Dropbox is case sensitive so you can't have a file called
"Hello.doc" and one called "hello.doc".
There are some file names such as `thumbs.db` which Dropbox can't
store. There is a full list of them in the ["Ignored Files" section
of this document](https://www.dropbox.com/en/help/145). Rclone will
issue an error message `File name disallowed - not uploading` if it
attempt to upload one of those file names, but the sync won't fail.

139
docs/content/faq.md Normal file
View File

@@ -0,0 +1,139 @@
---
title: "FAQ"
description: "Rclone Frequently Asked Questions"
date: "2015-08-27"
---
Frequently Asked Questions
--------------------------
### Do all cloud storage systems support all rclone commands ###
Yes they do. All the rclone commands (eg `sync`, `copy` etc) will
work on all the remote storage systems.
### Can I copy the config from one machine to another ###
Sure! Rclone stores all of its config in a single file. If you want
to find this file, the simplest way is to run `rclone -h` and look at
the help for the `--config` flag which will tell you where it is. Eg,
```
$ rclone -h
Sync files and directories to and from local and remote object stores - v1.18.
[snip]
Options:
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
--checkers=8: Number of checkers to run in parallel.
-c, --checksum=false: Skip based on checksum & size, not mod-time & size
--config="/home/user/.rclone.conf": Config file.
[snip]
```
So in this config the config file can be found in
`/home/user/.rclone.conf`.
Just copy that to the equivalent place in the destination (run `rclone
-h` above again on the destination machine if not sure).
### Can rclone sync directly from drive to s3 ###
Rclone can sync between two remote cloud storage systems just fine.
Note that it effectively downloads the file and uploads it again, so
the node running rclone would need to have lots of bandwidth.
The syncs would be incremental (on a file by file basis).
Eg
rclone sync drive:Folder s3:bucket
### Using rclone from multiple locations at the same time ###
You can use rclone from multiple places at the same time if you choose
different subdirectory for the output, eg
```
Server A> rclone sync /tmp/whatever remote:ServerA
Server B> rclone sync /tmp/whatever remote:ServerB
```
If you sync to the same directory then you should use rclone copy
otherwise the two rclones may delete each others files, eg
```
Server A> rclone copy /tmp/whatever remote:Backup
Server B> rclone copy /tmp/whatever remote:Backup
```
The file names you upload from Server A and Server B should be
different in this case, otherwise some file systems (eg Drive) may
make duplicates.
### Why doesn't rclone support partial transfers / binary diffs like rsync? ###
Rclone stores each file you transfer as a native object on the remote
cloud storage system. This means that you can see the files you
upload as expected using alternative access methods (eg using the
Google Drive web interface). There is a 1:1 mapping between files on
your hard disk and objects created in the cloud storage system.
Cloud storage systems (at least none I've come across yet) don't
support partially uploading an object. You can't take an existing
object, and change some bytes in the middle of it.
It would be possible to make a sync system which stored binary diffs
instead of whole objects like rclone does, but that would break the
1:1 mapping of files on your hard disk to objects in the remote cloud
storage system.
All the cloud storage systems support partial downloads of content, so
it would be possible to make partial downloads work. However to make
this work efficiently this would require storing a significant amount
of metadata, which breaks the desired 1:1 mapping of files to objects.
### Can rclone do bi-directional sync? ###
No, not at present. rclone only does uni-directional sync from A ->
B. It may do in the future though since it has all the primitives - it
just requires writing the algorithm to do it.
### Can I use rclone with an HTTP proxy? ###
Yes. rclone will use the environment variables `HTTP_PROXY`,
`HTTPS_PROXY` and `NO_PROXY`, similar to cURL and other programs.
`HTTPS_PROXY` takes precedence over `HTTP_PROXY` for https requests.
The environment values may be either a complete URL or a "host[:port]",
in which case the "http" scheme is assumed.
The `NO_PROXY` allows you to disable the proxy for specific hosts.
Hosts must be comma separated, and can contain domains or parts.
For instance "foo.com" also matches "bar.foo.com".
### Rclone gives x509: failed to load system roots and no roots provided error ###
This means that `rclone` can't file the SSL root certificates. Likely
you are running `rclone` on a NAS with a cut-down Linux OS.
Rclone (via the Go runtime) tries to load the root certificates from
these places on Linux.
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
"/etc/ssl/ca-bundle.pem", // OpenSUSE
"/etc/pki/tls/cacert.pem", // OpenELEC
So doing something like this should fix the problem. It also sets the
time which is important for SSL to work properly.
```
mkdir -p /etc/ssl/certs/
curl -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
ntpclient -s -h pool.ntp.org
```

273
docs/content/filtering.md Normal file
View File

@@ -0,0 +1,273 @@
---
title: "Filtering"
description: "Filtering, includes and excludes"
date: "2015-09-27"
---
# Filtering, includes and excludes #
Rclone has a sophisticated set of include and exclude rules. Some of
these are based on patterns and some on other things like file size.
Each path as it passes through rclone is matched against the include
and exclude rules. The paths are matched without a leading `/`.
For example the files might be passed to the matching engine like this
* `file1.jpg`
* `file2.jpg`
* `directory/file3.jpg`
## Patterns ##
The patterns used to match files for inclusion or exclusion are based
on "file globs" as used by the unix shell.
If the pattern starts with a `/` then it only matches at the top level
of the directory tree. If it doesn't start with `/` then it is
matched starting at the end of the path, but it will only match a
complete path element.
file.jpg - matches "file.jpg"
- matches "directory/file.jpg"
- doesn't match "afile.jpg"
- doesn't match "directory/afile.jpg"
/file.jpg - matches "file.jpg"
- doesn't match "afile.jpg"
- doesn't match "directory/file.jpg"
A `*` matches anything but not a `/`.
*.jpg - matches "file.jpg"
- matches "directory/file.jpg"
- doesn't match "file.jpg/anotherfile.png"
Use `**` to match anything, including slashes.
dir/** - matches "dir/file.jpg"
- matches "dir/dir1/dir2/file.jpg"
- doesn't match "directory/file.jpg"
- doesn't match "adir/file.jpg"
A `?` matches any character except a slash `/`.
l?ss - matches "less"
- matches "lass"
- doesn't match "floss"
A `[` and `]` together make a a character class, such as `[a-z]` or
`[aeiou]` or `[[:alpha:]]`. See the [go regexp
docs](https://golang.org/pkg/regexp/syntax/) for more info on these.
h[ae]llo - matches "hello"
- matches "hallo"
- doesn't match "hullo"
A `{` and `}` define a choice between elements. It should contain a
comma seperated list of patterns, any of which might match. These
patterns can contain wildcards.
{one,two}_potato - matches "one_potato"
- matches "two_potato"
- doesn't match "three_potato"
- doesn't match "_potato"
Special characters can be escaped with a `\` before them.
\*.jpg - matches "*.jpg"
\\.jpg - matches "\.jpg"
\[one\].jpg - matches "[one].jpg"
### Differences between rsync and rclone patterns ###
Rclone implements bash style `{a,b,c}` glob matching which rsync doesn't.
Rclone ignores `/` at the end of a pattern.
Rclone always does a wildcard match so `\` must always escape a `\`.
## How the rules are used ##
Rclone maintains a list of include rules and exclude rules.
Each file is matched in order against the list until it finds a match.
The file is then included or excluded according to the rule type.
If the matcher falls off the bottom of the list then the path is
included.
For example given the following rules, `+` being include, `-` being
exclude,
- secret*.jpg
+ *.jpg
+ *.png
+ file2.avi
- *
This would include
* `file1.jpg`
* `file3.png`
* `file2.avi`
This would exclude
* `secret17.jpg`
* non `*.jpg` and `*.png`
## Adding filtering rules ##
Filtering rules are added with the following command line flags.
### `--exclude` - Exclude files matching pattern ###
Add a single exclude rule with `--exclude`.
Eg `--exclude *.bak` to exclude all bak files from the sync.
### `--exclude-from` - Read exclude patterns from file ###
Add exclude rules from a file.
Prepare a file like this `exclude-file.txt`
# a sample exclude rule file
*.bak
file2.jpg
Then use as `--exclude-from exclude-file.txt`. This will sync all
files except those ending in `bak` and `file2.jpg`.
This is useful if you have a lot of rules.
### `--include` - Include files matching pattern ###
Add a single include rule with `--include`.
Eg `--include *.{png,jpg}` to include all `png` and `jpg` files in the
backup and no others.
This adds an implicit `--exclude *` at the end of the filter list.
### `--include-from` - Read include patterns from file ###
Add include rules from a file.
Prepare a file like this `include-file.txt`
# a sample include rule file
*.jpg
*.png
file2.avi
Then use as `--include-from include-file.txt`. This will sync all
`jpg`, `png` files and `file2.avi`.
This is useful if you have a lot of rules.
This adds an implicit `--exclude *` at the end of the filter list.
### `--filter` - Add a file-filtering rule ###
This can be used to add a single include or exclude rule. Include
rules start with `+ ` and exclude rules start with `- `. A special
rule called `!` can be used to clear the existing rules.
Eg `--filter "- *.bak"` to exclude all bak files from the sync.
### `--filter-from` - Read filtering patterns from a file ###
Add include/exclude rules from a file.
Prepare a file like this `filter-file.txt`
# a sample exclude rule file
- secret*.jpg
+ *.jpg
+ *.png
+ file2.avi
# exclude everything else
- *
Then use as `--filter-from filter-file.txt`. The rules are processed
in the order that they are defined.
This example will include all `jpg` and `png` files, exclude any files
matching `secret*.jpg` and include `file2.avi`. Everything else will
be excluded from the sync.
### `--files-from` - Read list of source-file names ###
This reads a list of file names from the file passed in and **only**
these files are transferred. The filtering rules are ignored
completely if you use this option.
Prepare a file like this `files-from.txt`
# comment
file1.jpg
file2.jpg
Then use as `--files-from files-from.txt`. This will only transfer
`file1.jpg` and `file2.jpg` providing they exist.
### `--min-size` - Don't transfer any file smaller than this ###
This option controls the minimum size file which will be transferred.
This defaults to `kBytes` but a suffix of `k`, `M`, or `G` can be
used.
For example `--min-size 50k` means no files smaller than 50kByte will be
transferred.
### `--max-size` - Don't transfer any file larger than this ###
This option controls the maximum size file which will be transferred.
This defaults to `kBytes` but a suffix of `k`, `M`, or `G` can be
used.
For example `--max-size 1G` means no files larger than 1GByte will be
transferred.
### `--delete-excluded` - Delete files on dest excluded from sync ###
**Important** this flag is dangerous - use with `--dry-run` and `-v` first.
When doing `rclone sync` this will delete any files which are excluded
from the sync on the destination.
If for example you did a sync from `A` to `B` without the `--min-size 50k` flag
rclone sync A: B:
Then you repeated it like this with the `--delete-excluded`
rclone --min-size 50k --delete-excluded sync A: B:
This would delete all files on `B` which are less than 50 kBytes as
these are now excluded from the sync.
Always test first with `--dry-run` and `-v` before using this flag.
### `--dump-filters` - dump the filters to the output ###
This dumps the defined filters to the output as regular expressions.
Useful for debugging.
## Quoting shell metacharacters ##
The examples above may not work verbatim in your shell as they have
shell metacharacters in them (eg `*`), and may require quoting.
Eg linux, OSX
* `--include \*.jpg`
* `--include '*.jpg'`
* `--include='*.jpg'`
In Windows the expansion is done by the command not the shell so this
should work fine
* `--include *.jpg`

View File

@@ -0,0 +1,130 @@
---
title: "Google Cloud Storage"
description: "Rclone docs for Google Cloud Storage"
date: "2015-09-12"
---
<i class="fa fa-google"></i> Google Cloud Storage
-------------------------------------------------
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, eg `remote:bucket/path/to/dir`.
The initial setup for google cloud storage involves getting a token from Google Cloud Storage
which you need to do in your browser. `rclone config` walks you
through it.
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) google cloud storage
5) dropbox
6) drive
type> 4
Google Application Client Id - leave blank normally.
client_id>
Google Application Client Secret - leave blank normally.
client_secret>
Project number optional - needed only for list/create/delete buckets - see your developer console.
project_number> 12345678
Access Control List for new objects.
Choose a number from below, or type in your own value
* Object owner gets OWNER access, and all Authenticated Users get READER access.
1) authenticatedRead
* Object owner gets OWNER access, and project team owners get OWNER access.
2) bucketOwnerFullControl
* Object owner gets OWNER access, and project team owners get READER access.
3) bucketOwnerRead
* Object owner gets OWNER access [default if left blank].
4) private
* Object owner gets OWNER access, and project team members get access according to their roles.
5) projectPrivate
* Object owner gets OWNER access, and all Users get READER access.
6) publicRead
object_acl> 4
Access Control List for new buckets.
Choose a number from below, or type in your own value
* Project team owners get OWNER access, and all Authenticated Users get READER access.
1) authenticatedRead
* Project team owners get OWNER access [default if left blank].
2) private
* Project team members get access according to their roles.
3) projectPrivate
* Project team owners get OWNER access, and all Users get READER access.
4) publicRead
* Project team owners get OWNER access, and all Users get WRITER access.
5) publicReadWrite
bucket_acl> 2
Remote config
Remote config
Use auto config?
* Say Y if not sure
* Say N if you are working on a remote or headless machine or Y didn't work
y) Yes
n) No
y/n> y
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
Got code
--------------------
[remote]
type = google cloud storage
client_id =
client_secret =
token = {"AccessToken":"xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx","Expiry":"2014-07-17T20:49:14.929208288+01:00","Extra":null}
project_number = 12345678
object_acl = private
bucket_acl = private
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
Note that rclone runs a webserver on your local machine to collect the
token as returned from Google if you use auto config mode. This only
runs from the moment it opens your browser to the moment you get back
the verification code. This is on `http://127.0.0.1:53682/` and this
it may require you to unblock it temporarily if you are running a host
firewall, or use manual mode.
This remote is called `remote` and can now be used like this
See all the buckets in your project
rclone lsd remote:
Make a new bucket
rclone mkdir remote:bucket
List the contents of a bucket
rclone ls remote:bucket
Sync `/home/local/directory` to the remote bucket, deleting any excess
files in the bucket.
rclone sync /home/local/directory remote:bucket
### Modified time ###
Google google cloud storage stores md5sums natively and rclone stores
modification times as metadata on the object, under the "mtime" key in
RFC3339 format accurate to 1ns.

39
docs/content/install.md Normal file
View File

@@ -0,0 +1,39 @@
---
title: "Install"
description: "Rclone Installation"
date: "2015-06-12"
---
Install
-------
Rclone is a Go program and comes as a single binary file.
[Download](/downloads/) the relevant binary.
Or alternatively if you have Go installed use
go get github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`. If you have built
rclone before then you will want to update its dependencies first with
this (remove `-f` if using go < 1.4)
go get -u -v -f github.com/ncw/rclone/...
See the [Usage section](/docs/) of the docs for how to use rclone, or
run `rclone -h`.
linux binary downloaded files install example
-------
unzip rclone-v1.17-linux-amd64.zip
cd rclone-v1.17-linux-amd64
#copy binary file
sudo cp rclone /usr/sbin/
sudo chown root:root /usr/sbin/rclone
sudo chmod 755 /usr/sbin/rclone
#install manpage
sudo mkdir -p /usr/local/share/man/man1
sudo cp rclone.1 /usr/local/share/man/man1/
sudo mandb

34
docs/content/licence.md Normal file
View File

@@ -0,0 +1,34 @@
---
title: "Licence"
description: "Rclone Licence"
date: "2015-06-06"
---
License
-------
This is free software under the terms of MIT the license (check the
COPYING file included with the source code).
```
Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
```

View File

@@ -4,8 +4,8 @@ description: "Rclone docs for the local filesystem"
date: "2014-04-26"
---
Local Filesystem
----------------
<i class="fa fa-file"></i> Local Filesystem
-------------------------------------------
Local paths are specified as normal filesystem paths, eg `/path/to/wherever`, so
@@ -16,10 +16,23 @@ Will sync `/home/source` to `/tmp/destination`
These can be configured into the config file for consistencies sake,
but it is probably easier not to.
Modified time
-------------
### Modified time ###
Rclone reads and writes the modified time using an accuracy determined by
the OS. Typically this is 1ns on Linux, 10 ns on Windows and 1 Second
on OS X.
### Filenames ###
Filenames are expected to be encoded in UTF-8 on disk. This is the
normal case for Windows and OS X. There is a bit more uncertainty in
the Linux world, but new distributions will have UTF-8 encoded files
names.
If an invalid (non-UTF8) filename is read, the invalid caracters will
be replaced with the unicode replacement character, '<27>'. `rclone`
will emit a debug message in this case (use `-v` to see), eg
```
Local file system at .: Replacing invalid UTF-8 characters in "gro\xdf"
```

111
docs/content/onedrive.md Normal file
View File

@@ -0,0 +1,111 @@
---
title: "Microsoft One Drive"
description: "Rclone docs for Microsoft One Drive"
date: "2015-10-14"
---
<i class="fa fa-windows"></i> Microsoft One Drive
-----------------------------------------
Paths are specified as `remote:path`
Paths may be as deep as required, eg `remote:directory/subdirectory`.
The initial setup for One Drive involves getting a token from
Microsoft which you need to do in your browser. `rclone config` walks
you through it.
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) amazon cloud drive
2) drive
3) dropbox
4) google cloud storage
5) local
6) onedrive
7) s3
8) swift
type> 6
Microsoft App Client Id - leave blank normally.
client_id>
Microsoft App Client Secret - leave blank normally.
client_secret>
Remote config
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
Got code
--------------------
[remote]
client_id =
client_secret =
token = {"access_token":"XXXXXX"}
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
Note that rclone runs a webserver on your local machine to collect the
token as returned from Microsoft. This only runs from the moment it
opens your browser to the moment you get back the verification
code. This is on `http://127.0.0.1:53682/` and this it may require
you to unblock it temporarily if you are running a host firewall.
Once configured you can then use `rclone` like this,
List directories in top level of your One Drive
rclone lsd remote:
List all the files in your One Drive
rclone ls remote:
To copy a local directory to an One Drive directory called backup
rclone copy /home/source remote:backup
### Modified time and MD5SUMs ###
One Drive allows modification times to be set on objects accurate to 1
second. These will be used to detect whether objects need syncing or
not.
One drive does not support MD5SUMs. This means the `--checksum` flag
will be equivalent to the `--size-only` flag.
### Deleting files ###
Any files you delete with rclone will end up in the trash. Microsoft
doesn't provide an API to permanently delete files, nor to empty the
trash, so you will have to do that with one of Microsoft's apps or via
the One Drive website.
### Limitations ###
Note that One Drive is case sensitive so you can't have a
file called "Hello.doc" and one called "hello.doc".
Rclone only supports your default One Drive, and doesn't work with One
Drive for business. Both these issues may be fixed at some point
depending on user demand!
There are quite a few characters that can't be in One Drive file
names. These can't occur on Windows platforms, but on non-Windows
platforms they are common. Rclone will map these names to and from an
identical looking unicode equivalent. For example if a file has a `?`
in it will be mapped to `` instead.

72
docs/content/overview.md Normal file
View File

@@ -0,0 +1,72 @@
---
title: "Overview of cloud storage systems"
description: "Overview of cloud storage systems"
type: page
date: "2015-09-06"
---
# Overview of cloud storage systems #
Each cloud storage system is slighly different. Rclone attempts to
provide a unified interface to them, but some underlying differences
show through.
## Features ##
Here is an overview of the major features of each cloud storage system.
| Name | MD5SUM | ModTime | Case Sensitive | Duplicate Files |
| ---------------------- |:-------:|:-------:|:--------------:|:---------------:|
| Google Drive | Yes | Yes | No | Yes |
| Amazon S3 | Yes | Yes | No | No |
| Openstack Swift | Yes | Yes | No | No |
| Dropbox | No | No | Yes | No |
| Google Cloud Storage | Yes | Yes | No | No |
| Amazon Cloud Drive | Yes | No | Yes | No |
| Microsoft One Drive | No | Yes | Yes | No |
| The local filesystem | Yes | Yes | Depends | No |
### MD5SUM ###
The cloud storage system supports MD5SUMs of the objects. This
is used if available when transferring data as an integrity check and
can be specifically used with the `--checksum` flag in syncs and in
the `check` command.
### ModTime ###
The cloud storage system supports setting modification times on
objects. If it does then this enables a using the modification times
as part of the sync. If not then only the size will be checked by
default, though the MD5SUM can be checked with the `--checksum` flag.
All cloud storage systems support some kind of date on the object and
these will be set when transferring from the cloud storage system.
### Case Sensitive ###
If a cloud storage systems is case sensitive then it is possible to
have two files which differ only in case, eg `file.txt` and
`FILE.txt`. If a cloud storage system is case insensitive then that
isn't possible.
This can cause problems when syncing between a case insensitive
system and a case sensitive system. The symptom of this is that no
matter how many times you run the sync it never completes fully.
The local filesystem may or may not be case sensitive depending on OS.
* Windows - usuall case insensitive
* OSX - usually case insensitive, though it is possible to format case sensitive
* Linux - usually case sensitive, but there are case insensitive file systems (eg FAT formatted USB keys)
Most of the time this doesn't cause any problems as people tend to
avoid files whose name differs only by case even on case sensitive
systems.
### Duplicate files ###
If a cloud storage system allows duplicate files then it can have two
objects with the same name.
This confuses rclone greatly when syncing.

65
docs/content/privacy.md Normal file
View File

@@ -0,0 +1,65 @@
---
title: "Privacy Policy"
description: "Rclone Privacy Policy"
date: "2015-08-19"
---
# Rclone Privacy Policy #
## What is this Privacy Policy for? ##
This privacy policy is for this website http://rclone.org and governs the privacy of its users who choose to use it.
The policy sets out the different areas where user privacy is concerned and outlines the obligations & requirements of the users, the website and website owners. Furthermore the way this website processes, stores and protects user data and information will also be detailed within this policy.
## The Website ##
This website and its owners take a proactive approach to user privacy and ensure the necessary steps are taken to protect the privacy of its users throughout their visiting experience. This website complies to all UK national laws and requirements for user privacy.
## Use of Cookies ##
This website uses cookies to better the users experience while visiting the website. Where applicable this website uses a cookie control system allowing the user on their first visit to the website to allow or disallow the use of cookies on their computer / device. This complies with recent legislation requirements for websites to obtain explicit consent from users before leaving behind or reading files such as cookies on a user's computer / device.
Cookies are small files saved to the user's computers hard drive that track, save and store information about the user's interactions and usage of the website. This allows the website, through its server to provide the users with a tailored experience within this website.
Users are advised that if they wish to deny the use and saving of cookies from this website on to their computers hard drive they should take necessary steps within their web browsers security settings to block all cookies from this website and its external serving vendors.
This website uses tracking software to monitor its visitors to better understand how they use it. This software is provided by Google Analytics which uses cookies to track visitor usage. The software will save a cookie to your computers hard drive in order to track and monitor your engagement and usage of the website, but will not store, save or collect personal information. You can read [Google's privacy policy here](http://www.google.com/privacy.html) for further information.
Other cookies may be stored to your computers hard drive by external vendors when this website uses referral programs, sponsored links or adverts. Such cookies are used for conversion and referral tracking and typically expire after 30 days, though some may take longer. No personal information is stored, saved or collected.
## Contact & Communication ##
Users contacting this website and/or its owners do so at their own discretion and provide any such personal details requested at their own risk. Your personal information is kept private and stored securely until a time it is no longer required or has no use, as detailed in the Data Protection Act 1998.
This website and its owners use any information submitted to provide you with further information about the products / services they offer or to assist you in answering any questions or queries you may have submitted.
## External Links ##
Although this website only looks to include quality, safe and relevant external links, users are advised adopt a policy of caution before clicking any external web links mentioned throughout this website.
The owners of this website cannot guarantee or verify the contents of any externally linked website despite their best efforts. Users should therefore note they click on external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
## Adverts and Sponsored Links ##
This website may contain sponsored links and adverts. These will typically be served through our advertising partners, to whom may have detailed privacy policies relating directly to the adverts they serve.
Clicking on any such adverts will send you to the advertisers website through a referral program which may use cookies and will track the number of referrals sent from this website. This may include the use of cookies which may in turn be saved on your computers hard drive. Users should therefore note they click on sponsored external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
### Social Media Platforms ##
Communication, engagement and actions taken through external social media platforms that this website and its owners participate on are subject to the terms and conditions as well as the privacy policies held with each social media platform respectively.
Users are advised to use social media platforms wisely and communicate / engage upon them with due care and caution in regard to their own privacy and personal details. This website nor its owners will ever ask for personal or sensitive information through social media platforms and encourage users wishing to discuss sensitive details to contact them through primary communication channels such as email.
This website may use social sharing buttons which help share web content directly from web pages to the social media platform in question. Users are advised before using such social sharing buttons that they do so at their own discretion and note that the social media platform may track and save your request to share a web page respectively through your social media platform account.
## Resources & Further Information ##
* [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents)
* [Privacy and Electronic Communications Regulations 2003](http://www.legislation.gov.uk/uksi/2003/2426/contents/made)
* [Privacy and Electronic Communications Regulations 2003 - The Guide](https://ico.org.uk/for-organisations/guide-to-pecr/)
* [Twitter Privacy Policy](http://twitter.com/privacy)
* [Facebook Privacy Policy](http://www.facebook.com/about/privacy/)
* [Google Privacy Policy](http://www.google.com/privacy.html)
* [Sample Website Privacy Policy](http://www.jamieking.co.uk/resources/free_sample_privacy_policy.html)

View File

@@ -4,13 +4,11 @@ description: "Rclone docs for Amazon S3"
date: "2014-04-26"
---
Paths are specified as `remote:bucket` or `remote:`
<i class="fa fa-amazon"></i> Amazon S3
---------------------------------------
S3 paths can't refer to subdirectories within a bucket (yet).
So to copy a local directory to a s3 container called backup
rclone sync /home/source s3:backup
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, eg `remote:bucket/path/to/dir`.
Here is an example of making an s3 configuration. First run
@@ -29,41 +27,55 @@ Choose a number from below
1) swift
2) s3
3) local
4) drive
4) google cloud storage
5) dropbox
6) drive
type> 2
AWS Access Key ID.
access_key_id> accesskey
AWS Secret Access Key (password).
secret_access_key> secretaccesskey
Endpoint for S3 API.
Region to connect to.
Choose a number from below, or type in your own value
* The default endpoint - a good choice if you are unsure.
* US Region, Northern Virginia or Pacific Northwest.
* Leave location constraint empty.
1) https://s3.amazonaws.com/
* US Region, Northern Virginia only.
* Leave location constraint empty.
2) https://s3-external-1.amazonaws.com
1) us-east-1
* US West (Oregon) Region
* Needs location constraint us-west-2.
2) us-west-2
[snip]
* South America (Sao Paulo) Region
* Needs location constraint sa-east-1.
9) https://s3-sa-east-1.amazonaws.com
endpoint> 1
Location constraint - must be set to match the Endpoint.
9) sa-east-1
* If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.
10) other-v2-signature
* If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.
11) other-v4-signature
region> 1
Endpoint for S3 API.
Leave blank if using AWS to use the default endpoint for the region.
Specify if using an S3 clone such as Ceph.
endpoint>
Location constraint - must be set to match the Region. Used when creating buckets only.
Choose a number from below, or type in your own value
* Empty for US Region, Northern Virginia or Pacific Northwest.
1)
* US West (Oregon) Region.
2) us-west-2
* US West (Northern California) Region.
3) us-west-1
* EU (Ireland) Region.
4) eu-west-1
[snip]
* South America (Sao Paulo) Region.
9) sa-east-1
location_constraint> 1
Remote config
--------------------
[remote]
access_key_id = accesskey
secret_access_key = secretaccesskey
endpoint = https://s3.amazonaws.com/
region = us-east-1
endpoint =
location_constraint =
--------------------
y) Yes this is OK
@@ -102,8 +114,98 @@ files in the bucket.
rclone sync /home/local/directory remote:bucket
Modified time
-------------
### Modified time ###
The modified time is stored as metadata on the object as
`X-Amz-Meta-Mtime` as floating point since the epoch accurate to 1 ns.
### Multipart uploads ###
rclone supports multipart uploads with S3 which means that it can
upload files bigger than 5GB. Note that files uploaded with multipart
upload don't have an MD5SUM.
### Buckets and Regions ###
With Amazon S3 you can list buckets (`rclone lsd`) using any region,
but you can only access the content of a bucket from the region it was
created in. If you attempt to access a bucket from the wrong region,
you will get an error, `incorrect region, the bucket is not in 'XXX'
region`.
### Anonymous access to public buckets ###
If you want to use rclone to access a public bucket, configure with a
blank `access_key_id` and `secret_access_key`. Eg
```
e) Edit existing remote
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> anons3
What type of source is it?
Choose a number from below
1) amazon cloud drive
2) drive
3) dropbox
4) google cloud storage
5) local
6) s3
7) swift
type> 6
AWS Access Key ID - leave blank for anonymous access.
access_key_id>
AWS Secret Access Key (password) - leave blank for anonymous access.
secret_access_key>
Region to connect to.
region> 1
endpoint>
location_constraint>
```
Then use it as normal with the name of the public bucket, eg
rclone lsd anons3:1000genomes
You will be able to list and copy data but not upload it.
### Ceph ###
Ceph is an object storage system which presents an Amazon S3 interface.
To use rclone with ceph, you need to set the following parameters in
the config.
```
access_key_id = Whatever
secret_access_key = Whatever
endpoint = https://ceph.endpoint.goes.here/
region = other-v2-signature
```
Note also that Ceph sometimes puts `/` in the passwords it gives
users. If you read the secret access key using the command line tools
you will get a JSON blob with the `/` escaped as `\/`. Make sure you
only write `/` in the secret access key.
Eg the dump from Ceph looks something like this (irrelevant keys
removed).
```
{
"user_id": "xxx",
"display_name": "xxxx",
"keys": [
{
"user": "xxx",
"access_key": "xxxxxx",
"secret_key": "xxxxxx\/xxxx"
}
],
}
```
Because this is a json dump, it is encoding the `/` as `\/`, so if you
use the secret key as `xxxxxx/xxxx` it will work fine.

View File

@@ -4,13 +4,17 @@ description: "Swift"
date: "2014-04-26"
---
<i class="fa fa-space-shuttle"></i>Swift
----------------------------------------
Swift refers to [Openstack Object Storage](http://www.openstack.org/software/openstack-storage/).
Commercial implementations of that being:
* [Rackspace Cloud Files](http://www.rackspace.com/cloud/files/)
* [Memset Memstore](http://www.memset.com/cloud/storage/)
Paths are specified as `remote:container` or `remote:`
Paths are specified as `remote:container` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, eg `remote:container/path/to/dir`.
Here is an example of making a swift configuration. First run
@@ -48,12 +52,15 @@ Choose a number from below, or type in your own value
* Memset Memstore UK v2
5) https://auth.storage.memset.com/v2.0
auth> 1
Tenant name - optional
tenant>
Remote config
--------------------
[remote]
user = user_name
key = password_or_api_key
auth = https://auth.api.rackspacecloud.com/v1.0
tenant =
--------------------
y) Yes this is OK
e) Edit this remote
@@ -80,8 +87,7 @@ excess files in the container.
rclone sync /home/local/directory remote:container
Modified time
-------------
### Modified time ###
The modified time is stored as metadata on the object as
`X-Object-Meta-Mtime` as floating point since the epoch accurate to 1

View File

@@ -3,6 +3,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="{{ .Description }}">
<meta name="author" content="Nick Craig-Wood">
<link rel="shortcut icon" type="image/png" href="/img/rclone-16x16.png"/>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),

View File

@@ -12,17 +12,36 @@
<div class="collapse navbar-collapse navbar-ex1-collapse">
<ul class="nav navbar-nav">
<li><a href="/downloads/"><i class="fa fa-cloud-download"></i> Downloads</a></li>
<li><a href="/docs/"><i class="fa fa-book"></i> Docs</a></li>
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Docs</a>
<ul class="dropdown-menu">
<li><a href="/install/"><i class="fa fa-book"></i> Installation</a></li>
<li><a href="/docs/"><i class="fa fa-book"></i> Usage</a></li>
<li><a href="/filtering/"><i class="fa fa-book"></i> Filtering</a></li>
<li><a href="/changelog/"><i class="fa fa-book"></i> Changelog</a></li>
<li><a href="/bugs/"><i class="fa fa-book"></i> Bugs</a></li>
<li><a href="/faq/"><i class="fa fa-book"></i> FAQ</a></li>
<li><a href="/licence/"><i class="fa fa-book"></i> Licence</a></li>
<li><a href="/authors/"><i class="fa fa-book"></i> Authors</a></li>
<li><a href="/donate/"><i class="fa fa-book"></i> Donate</a></li>
<li><a href="/privacy/"><i class="fa fa-book"></i> Privacy Policy</a></li>
</ul>
</li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Storage Systems</a>
<ul class="dropdown-menu">
<li><a href="/overview/"><i class="fa fa-archive"></i> Overview</a></li>
<li><a href="/drive/"><i class="fa fa-google"></i> Drive</a></li>
<li><a href="/s3/"><i class="fa fa-archive"></i> S3</a></li>
<li><a href="/s3/"><i class="fa fa-amazon"></i> S3</a></li>
<li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Swift</a></li>
<li><a href="/dropbox/"><i class="fa fa-dropbox"></i> Dropbox</a></li>
<li><a href="/googlecloudstorage/"><i class="fa fa-google"></i> Google Cloud Storage</a></li>
<li><a href="/amazonclouddrive/"><i class="fa fa-amazon"></i> Amazon Cloud Drive</a></li>
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft One Drive</a></li>
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
</ul>
</li>
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
</ul>
</div>
</div>

View File

@@ -4,4 +4,23 @@ body {
footer {
margin: 50px 0;
}
}
table {
background-color:#e0e0ff
}
tbody td, th {
border: 1px solid black;
padding: 3px 7px 2px 7px;
}
thead td, th {
border: 1px solid black;
padding: 3px 7px 2px 7px;
font-weight: bold;
}
tbody tr:nth-child(odd) {
background-color:#d0d0ff
}

View File

@@ -1,22 +1,21 @@
/*!
* Font Awesome 4.1.0 by @davegandy - http://fontawesome.io - @fontawesome
* Font Awesome 4.4.0 by @davegandy - http://fontawesome.io - @fontawesome
* License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
*/
/* FONT PATH
* -------------------------- */
@font-face {
font-family: 'FontAwesome';
src: url('../fonts/fontawesome-webfont.eot?v=4.1.0');
src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.1.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff?v=4.1.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.1.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.1.0#fontawesomeregular') format('svg');
src: url('../fonts/fontawesome-webfont.eot?v=4.4.0');
src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.4.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff2?v=4.4.0') format('woff2'), url('../fonts/fontawesome-webfont.woff?v=4.4.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.4.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.4.0#fontawesomeregular') format('svg');
font-weight: normal;
font-style: normal;
}
.fa {
display: inline-block;
font-family: FontAwesome;
font-style: normal;
font-weight: normal;
line-height: 1;
font: normal normal normal 14px/1 FontAwesome;
font-size: inherit;
text-rendering: auto;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
@@ -65,6 +64,19 @@
border: solid 0.08em #eeeeee;
border-radius: .1em;
}
.fa-pull-left {
float: left;
}
.fa-pull-right {
float: right;
}
.fa.fa-pull-left {
margin-right: .3em;
}
.fa.fa-pull-right {
margin-left: .3em;
}
/* Deprecated as of 4.4.0 */
.pull-right {
float: right;
}
@@ -78,36 +90,24 @@
margin-left: .3em;
}
.fa-spin {
-webkit-animation: spin 2s infinite linear;
-moz-animation: spin 2s infinite linear;
-o-animation: spin 2s infinite linear;
animation: spin 2s infinite linear;
-webkit-animation: fa-spin 2s infinite linear;
animation: fa-spin 2s infinite linear;
}
@-moz-keyframes spin {
0% {
-moz-transform: rotate(0deg);
}
100% {
-moz-transform: rotate(359deg);
}
.fa-pulse {
-webkit-animation: fa-spin 1s infinite steps(8);
animation: fa-spin 1s infinite steps(8);
}
@-webkit-keyframes spin {
@-webkit-keyframes fa-spin {
0% {
-webkit-transform: rotate(0deg);
transform: rotate(0deg);
}
100% {
-webkit-transform: rotate(359deg);
transform: rotate(359deg);
}
}
@-o-keyframes spin {
0% {
-o-transform: rotate(0deg);
}
100% {
-o-transform: rotate(359deg);
}
}
@keyframes spin {
@keyframes fa-spin {
0% {
-webkit-transform: rotate(0deg);
transform: rotate(0deg);
@@ -120,43 +120,40 @@
.fa-rotate-90 {
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1);
-webkit-transform: rotate(90deg);
-moz-transform: rotate(90deg);
-ms-transform: rotate(90deg);
-o-transform: rotate(90deg);
transform: rotate(90deg);
}
.fa-rotate-180 {
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2);
-webkit-transform: rotate(180deg);
-moz-transform: rotate(180deg);
-ms-transform: rotate(180deg);
-o-transform: rotate(180deg);
transform: rotate(180deg);
}
.fa-rotate-270 {
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);
-webkit-transform: rotate(270deg);
-moz-transform: rotate(270deg);
-ms-transform: rotate(270deg);
-o-transform: rotate(270deg);
transform: rotate(270deg);
}
.fa-flip-horizontal {
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);
-webkit-transform: scale(-1, 1);
-moz-transform: scale(-1, 1);
-ms-transform: scale(-1, 1);
-o-transform: scale(-1, 1);
transform: scale(-1, 1);
}
.fa-flip-vertical {
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);
-webkit-transform: scale(1, -1);
-moz-transform: scale(1, -1);
-ms-transform: scale(1, -1);
-o-transform: scale(1, -1);
transform: scale(1, -1);
}
:root .fa-rotate-90,
:root .fa-rotate-180,
:root .fa-rotate-270,
:root .fa-flip-horizontal,
:root .fa-flip-vertical {
filter: none;
}
.fa-stack {
position: relative;
display: inline-block;
@@ -222,6 +219,8 @@
.fa-check:before {
content: "\f00c";
}
.fa-remove:before,
.fa-close:before,
.fa-times:before {
content: "\f00d";
}
@@ -551,7 +550,8 @@
.fa-arrows-h:before {
content: "\f07e";
}
.fa-bar-chart-o:before {
.fa-bar-chart-o:before,
.fa-bar-chart:before {
content: "\f080";
}
.fa-twitter-square:before {
@@ -627,6 +627,7 @@
.fa-twitter:before {
content: "\f099";
}
.fa-facebook-f:before,
.fa-facebook:before {
content: "\f09a";
}
@@ -639,6 +640,7 @@
.fa-credit-card:before {
content: "\f09d";
}
.fa-feed:before,
.fa-rss:before {
content: "\f09e";
}
@@ -1276,7 +1278,8 @@
.fa-male:before {
content: "\f183";
}
.fa-gittip:before {
.fa-gittip:before,
.fa-gratipay:before {
content: "\f184";
}
.fa-sun-o:before {
@@ -1380,7 +1383,6 @@
.fa-digg:before {
content: "\f1a6";
}
.fa-pied-piper-square:before,
.fa-pied-piper:before {
content: "\f1a7";
}
@@ -1497,6 +1499,7 @@
content: "\f1cc";
}
.fa-life-bouy:before,
.fa-life-buoy:before,
.fa-life-saver:before,
.fa-support:before,
.fa-life-ring:before {
@@ -1519,6 +1522,8 @@
.fa-git:before {
content: "\f1d3";
}
.fa-y-combinator-square:before,
.fa-yc-square:before,
.fa-hacker-news:before {
content: "\f1d4";
}
@@ -1564,3 +1569,458 @@
.fa-bomb:before {
content: "\f1e2";
}
.fa-soccer-ball-o:before,
.fa-futbol-o:before {
content: "\f1e3";
}
.fa-tty:before {
content: "\f1e4";
}
.fa-binoculars:before {
content: "\f1e5";
}
.fa-plug:before {
content: "\f1e6";
}
.fa-slideshare:before {
content: "\f1e7";
}
.fa-twitch:before {
content: "\f1e8";
}
.fa-yelp:before {
content: "\f1e9";
}
.fa-newspaper-o:before {
content: "\f1ea";
}
.fa-wifi:before {
content: "\f1eb";
}
.fa-calculator:before {
content: "\f1ec";
}
.fa-paypal:before {
content: "\f1ed";
}
.fa-google-wallet:before {
content: "\f1ee";
}
.fa-cc-visa:before {
content: "\f1f0";
}
.fa-cc-mastercard:before {
content: "\f1f1";
}
.fa-cc-discover:before {
content: "\f1f2";
}
.fa-cc-amex:before {
content: "\f1f3";
}
.fa-cc-paypal:before {
content: "\f1f4";
}
.fa-cc-stripe:before {
content: "\f1f5";
}
.fa-bell-slash:before {
content: "\f1f6";
}
.fa-bell-slash-o:before {
content: "\f1f7";
}
.fa-trash:before {
content: "\f1f8";
}
.fa-copyright:before {
content: "\f1f9";
}
.fa-at:before {
content: "\f1fa";
}
.fa-eyedropper:before {
content: "\f1fb";
}
.fa-paint-brush:before {
content: "\f1fc";
}
.fa-birthday-cake:before {
content: "\f1fd";
}
.fa-area-chart:before {
content: "\f1fe";
}
.fa-pie-chart:before {
content: "\f200";
}
.fa-line-chart:before {
content: "\f201";
}
.fa-lastfm:before {
content: "\f202";
}
.fa-lastfm-square:before {
content: "\f203";
}
.fa-toggle-off:before {
content: "\f204";
}
.fa-toggle-on:before {
content: "\f205";
}
.fa-bicycle:before {
content: "\f206";
}
.fa-bus:before {
content: "\f207";
}
.fa-ioxhost:before {
content: "\f208";
}
.fa-angellist:before {
content: "\f209";
}
.fa-cc:before {
content: "\f20a";
}
.fa-shekel:before,
.fa-sheqel:before,
.fa-ils:before {
content: "\f20b";
}
.fa-meanpath:before {
content: "\f20c";
}
.fa-buysellads:before {
content: "\f20d";
}
.fa-connectdevelop:before {
content: "\f20e";
}
.fa-dashcube:before {
content: "\f210";
}
.fa-forumbee:before {
content: "\f211";
}
.fa-leanpub:before {
content: "\f212";
}
.fa-sellsy:before {
content: "\f213";
}
.fa-shirtsinbulk:before {
content: "\f214";
}
.fa-simplybuilt:before {
content: "\f215";
}
.fa-skyatlas:before {
content: "\f216";
}
.fa-cart-plus:before {
content: "\f217";
}
.fa-cart-arrow-down:before {
content: "\f218";
}
.fa-diamond:before {
content: "\f219";
}
.fa-ship:before {
content: "\f21a";
}
.fa-user-secret:before {
content: "\f21b";
}
.fa-motorcycle:before {
content: "\f21c";
}
.fa-street-view:before {
content: "\f21d";
}
.fa-heartbeat:before {
content: "\f21e";
}
.fa-venus:before {
content: "\f221";
}
.fa-mars:before {
content: "\f222";
}
.fa-mercury:before {
content: "\f223";
}
.fa-intersex:before,
.fa-transgender:before {
content: "\f224";
}
.fa-transgender-alt:before {
content: "\f225";
}
.fa-venus-double:before {
content: "\f226";
}
.fa-mars-double:before {
content: "\f227";
}
.fa-venus-mars:before {
content: "\f228";
}
.fa-mars-stroke:before {
content: "\f229";
}
.fa-mars-stroke-v:before {
content: "\f22a";
}
.fa-mars-stroke-h:before {
content: "\f22b";
}
.fa-neuter:before {
content: "\f22c";
}
.fa-genderless:before {
content: "\f22d";
}
.fa-facebook-official:before {
content: "\f230";
}
.fa-pinterest-p:before {
content: "\f231";
}
.fa-whatsapp:before {
content: "\f232";
}
.fa-server:before {
content: "\f233";
}
.fa-user-plus:before {
content: "\f234";
}
.fa-user-times:before {
content: "\f235";
}
.fa-hotel:before,
.fa-bed:before {
content: "\f236";
}
.fa-viacoin:before {
content: "\f237";
}
.fa-train:before {
content: "\f238";
}
.fa-subway:before {
content: "\f239";
}
.fa-medium:before {
content: "\f23a";
}
.fa-yc:before,
.fa-y-combinator:before {
content: "\f23b";
}
.fa-optin-monster:before {
content: "\f23c";
}
.fa-opencart:before {
content: "\f23d";
}
.fa-expeditedssl:before {
content: "\f23e";
}
.fa-battery-4:before,
.fa-battery-full:before {
content: "\f240";
}
.fa-battery-3:before,
.fa-battery-three-quarters:before {
content: "\f241";
}
.fa-battery-2:before,
.fa-battery-half:before {
content: "\f242";
}
.fa-battery-1:before,
.fa-battery-quarter:before {
content: "\f243";
}
.fa-battery-0:before,
.fa-battery-empty:before {
content: "\f244";
}
.fa-mouse-pointer:before {
content: "\f245";
}
.fa-i-cursor:before {
content: "\f246";
}
.fa-object-group:before {
content: "\f247";
}
.fa-object-ungroup:before {
content: "\f248";
}
.fa-sticky-note:before {
content: "\f249";
}
.fa-sticky-note-o:before {
content: "\f24a";
}
.fa-cc-jcb:before {
content: "\f24b";
}
.fa-cc-diners-club:before {
content: "\f24c";
}
.fa-clone:before {
content: "\f24d";
}
.fa-balance-scale:before {
content: "\f24e";
}
.fa-hourglass-o:before {
content: "\f250";
}
.fa-hourglass-1:before,
.fa-hourglass-start:before {
content: "\f251";
}
.fa-hourglass-2:before,
.fa-hourglass-half:before {
content: "\f252";
}
.fa-hourglass-3:before,
.fa-hourglass-end:before {
content: "\f253";
}
.fa-hourglass:before {
content: "\f254";
}
.fa-hand-grab-o:before,
.fa-hand-rock-o:before {
content: "\f255";
}
.fa-hand-stop-o:before,
.fa-hand-paper-o:before {
content: "\f256";
}
.fa-hand-scissors-o:before {
content: "\f257";
}
.fa-hand-lizard-o:before {
content: "\f258";
}
.fa-hand-spock-o:before {
content: "\f259";
}
.fa-hand-pointer-o:before {
content: "\f25a";
}
.fa-hand-peace-o:before {
content: "\f25b";
}
.fa-trademark:before {
content: "\f25c";
}
.fa-registered:before {
content: "\f25d";
}
.fa-creative-commons:before {
content: "\f25e";
}
.fa-gg:before {
content: "\f260";
}
.fa-gg-circle:before {
content: "\f261";
}
.fa-tripadvisor:before {
content: "\f262";
}
.fa-odnoklassniki:before {
content: "\f263";
}
.fa-odnoklassniki-square:before {
content: "\f264";
}
.fa-get-pocket:before {
content: "\f265";
}
.fa-wikipedia-w:before {
content: "\f266";
}
.fa-safari:before {
content: "\f267";
}
.fa-chrome:before {
content: "\f268";
}
.fa-firefox:before {
content: "\f269";
}
.fa-opera:before {
content: "\f26a";
}
.fa-internet-explorer:before {
content: "\f26b";
}
.fa-tv:before,
.fa-television:before {
content: "\f26c";
}
.fa-contao:before {
content: "\f26d";
}
.fa-500px:before {
content: "\f26e";
}
.fa-amazon:before {
content: "\f270";
}
.fa-calendar-plus-o:before {
content: "\f271";
}
.fa-calendar-minus-o:before {
content: "\f272";
}
.fa-calendar-times-o:before {
content: "\f273";
}
.fa-calendar-check-o:before {
content: "\f274";
}
.fa-industry:before {
content: "\f275";
}
.fa-map-pin:before {
content: "\f276";
}
.fa-map-signs:before {
content: "\f277";
}
.fa-map-o:before {
content: "\f278";
}
.fa-map:before {
content: "\f279";
}
.fa-commenting:before {
content: "\f27a";
}
.fa-commenting-o:before {
content: "\f27b";
}
.fa-houzz:before {
content: "\f27c";
}
.fa-vimeo:before {
content: "\f27d";
}
.fa-black-tie:before {
content: "\f27e";
}
.fa-fonticons:before {
content: "\f280";
}

Binary file not shown.

BIN
docs/static/fonts/fontawesome-webfont.eot vendored Executable file → Normal file

Binary file not shown.

1064
docs/static/fonts/fontawesome-webfont.svg vendored Executable file → Normal file

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 248 KiB

After

Width:  |  Height:  |  Size: 348 KiB

BIN
docs/static/fonts/fontawesome-webfont.ttf vendored Executable file → Normal file

Binary file not shown.

BIN
docs/static/fonts/fontawesome-webfont.woff vendored Executable file → Normal file

Binary file not shown.

Binary file not shown.

BIN
docs/static/img/rclone-16x16.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1019 B

File diff suppressed because it is too large Load Diff

56
drive/drive_test.go Normal file
View File

@@ -0,0 +1,56 @@
// Test Drive filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package drive_test
import (
"testing"
"github.com/ncw/rclone/drive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func init() {
fstests.NilObject = fs.Object((*drive.Object)(nil))
fstests.RemoteName = "TestDrive:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

247
drive/upload.go Normal file
View File

@@ -0,0 +1,247 @@
// Upload for drive
//
// Docs
// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
// Files insert: https://developers.google.com/drive/v2/reference/files/insert
// Files update: https://developers.google.com/drive/v2/reference/files/update
//
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
package drive
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/ncw/rclone/fs"
"google.golang.org/api/drive/v2"
"google.golang.org/api/googleapi"
)
const (
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
statusResumeIncomplete = 308
// Number of times to try each chunk
maxTries = 10
)
// resumableUpload is used by the generated APIs to provide resumable uploads.
// It is not used by developers directly.
type resumableUpload struct {
f *Fs
remote string
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
URI string
// Media is the object being uploaded.
Media io.Reader
// MediaType defines the media type, e.g. "image/jpeg".
MediaType string
// ContentLength is the full size of the object being uploaded.
ContentLength int64
// Return value
ret *drive.File
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) {
fileID := info.Id
var body io.Reader
body, err := googleapi.WithoutDataWrapper.JSONReader(info)
if err != nil {
return nil, err
}
params := make(url.Values)
params.Set("alt", "json")
params.Set("uploadType", "resumable")
urls := "https://www.googleapis.com/upload/drive/v2/files"
method := "POST"
if fileID != "" {
params.Set("setModifiedDate", "true")
urls += "/{fileId}"
method = "PUT"
}
urls += "?" + params.Encode()
req, _ := http.NewRequest(method, urls, body)
googleapi.Expand(req.URL, map[string]string{
"fileId": fileID,
})
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType)
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
req.Header.Set("User-Agent", fs.UserAgent)
var res *http.Response
err = f.pacer.Call(func() (bool, error) {
res, err = f.client.Do(req)
if err == nil {
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
return shouldRetry(err)
})
if err != nil {
return nil, err
}
loc := res.Header.Get("Location")
rx := &resumableUpload{
f: f,
remote: remote,
URI: loc,
Media: in,
MediaType: contentType,
ContentLength: size,
}
return rx.Upload()
}
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(start int64, body []byte) *http.Request {
reqSize := int64(len(body))
req, _ := http.NewRequest("POST", rx.URI, bytes.NewBuffer(body))
req.ContentLength = reqSize
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
}
req.Header.Set("Content-Type", rx.MediaType)
req.Header.Set("User-Agent", fs.UserAgent)
return req
}
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, fmt.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, fmt.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(start int64, body []byte) (int, error) {
req := rx.makeRequest(start, body)
res, err := rx.f.client.Do(req)
if err != nil {
return 599, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == statusResumeIncomplete {
return res.StatusCode, nil
}
err = googleapi.CheckResponse(res)
if err != nil {
return res.StatusCode, err
}
// When the entire file upload is complete, the server
// responds with an HTTP 201 Created along with any metadata
// associated with this resource. If this request had been
// updating an existing entity rather than creating a new one,
// the HTTP response code for a completed upload would have
// been 200 OK.
//
// So parse the response out of the body. We aren't expecting
// any other 2xx codes, so we parse it unconditionaly on
// StatusCode
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
return 598, err
}
return res.StatusCode, nil
}
// Upload uploads the chunks from the input
// It retries each chunk maxTries times (with a pause of uploadPause between attempts).
func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
buf := make([]byte, chunkSize)
var StatusCode int
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
} else {
buf = buf[:reqSize]
}
// Read the chunk
_, err := io.ReadFull(rx.Media, buf)
if err != nil {
return nil, err
}
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debug(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(start, buf)
again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false
err = nil
}
return again, err
})
if err != nil {
return nil, err
}
start += reqSize
}
// Resume or retry uploads that fail due to connection interruptions or
// any 5xx errors, including:
//
// 500 Internal Server Error
// 502 Bad Gateway
// 503 Service Unavailable
// 504 Gateway Timeout
//
// Use an exponential backoff strategy if any 5xx server error is
// returned when resuming or retrying upload requests. These errors can
// occur if a server is getting overloaded. Exponential backoff can help
// alleviate these kinds of problems during periods of high volume of
// requests or heavy network traffic. Other kinds of requests should not
// be handled by exponential backoff but you can still retry a number of
// them. When retrying these requests, limit the number of times you
// retry them. For example your code could limit to ten retries or less
// before reporting an error.
//
// Handle 404 Not Found errors when doing resumable uploads by starting
// the entire upload over from the beginning.
if rx.ret == nil {
return nil, fs.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
}
return rx.ret, nil
}

682
dropbox/dropbox.go Normal file
View File

@@ -0,0 +1,682 @@
// Package dropbox provides an interface to Dropbox object storage
package dropbox
/*
Limitations of dropbox
File system is case insensitive
*/
import (
"crypto/md5"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"path"
"regexp"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/spf13/pflag"
"github.com/stacktic/dropbox"
)
// Constants
const (
rcloneAppKey = "5jcck7diasz0rqy"
rcloneAppSecret = "m8WRxJ6b1Z/Y25fDwJWS"
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
)
var (
// A regexp matching path names for files Dropbox ignores
// See https://www.dropbox.com/en/help/145 - Ignored files
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
// Upload chunk size - setting too small makes uploads slow.
// Chunks aren't buffered into memory though so can set large.
uploadChunkSize = fs.SizeSuffix(128 * 1024 * 1024)
maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "dropbox",
NewFs: NewFs,
Config: configHelper,
Options: []fs.Option{{
Name: "app_key",
Help: "Dropbox App Key - leave blank normally.",
}, {
Name: "app_secret",
Help: "Dropbox App Secret - leave blank normally.",
}},
})
pflag.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
}
// Configuration helper - called after the user has put in the defaults
func configHelper(name string) {
// See if already have a token
token := fs.ConfigFile.MustValue(name, "token")
if token != "" {
fmt.Printf("Already have a dropbox token - refresh?\n")
if !fs.Confirm() {
return
}
}
// Get a dropbox
db, err := newDropbox(name)
if err != nil {
log.Fatalf("Failed to create dropbox client: %v", err)
}
// This method will ask the user to visit an URL and paste the generated code.
if err := db.Auth(); err != nil {
log.Fatalf("Failed to authorize: %v", err)
}
// Get the token
token = db.AccessToken()
// Stuff it in the config file if it has changed
old := fs.ConfigFile.MustValue(name, "token")
if token != old {
fs.ConfigFile.SetValue(name, "token", token)
fs.SaveConfig()
}
}
// Fs represents a remote dropbox server
type Fs struct {
name string // name of this remote
db *dropbox.Dropbox // the connection to the dropbox server
root string // the path we are working on
slashRoot string // root with "/" prefix, lowercase
slashRootSlash string // root with "/" prefix and postfix, lowercase
}
// Object describes a dropbox object
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
bytes int64 // size of the object
modTime time.Time // time it was last modified
hasMetadata bool // metadata is valid
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Dropbox root '%s'", f.root)
}
// Makes a new dropbox from the config
func newDropbox(name string) (*dropbox.Dropbox, error) {
db := dropbox.NewDropbox()
appKey := fs.ConfigFile.MustValue(name, "app_key")
if appKey == "" {
appKey = rcloneAppKey
}
appSecret := fs.ConfigFile.MustValue(name, "app_secret")
if appSecret == "" {
appSecret = fs.Reveal(rcloneAppSecret)
}
err := db.SetAppInfo(appKey, appSecret)
return db, err
}
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
if uploadChunkSize > maxUploadChunkSize {
return nil, fmt.Errorf("Chunk size too big, must be < %v", maxUploadChunkSize)
}
db, err := newDropbox(name)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
db: db,
}
f.setRoot(root)
// Read the token from the config file
token := fs.ConfigFile.MustValue(name, "token")
// Set our custom context which enables our custom transport for timeouts etc
db.SetContext(oauthutil.Context())
// Authorize the client
db.SetAccessToken(token)
// See if the root is actually an object
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
if err == nil && !entry.IsDir {
remote := path.Base(f.root)
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
obj := f.NewFsObject(remote)
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
}
return f, nil
}
// Sets root in f
func (f *Fs) setRoot(root string) {
f.root = strings.Trim(root, "/")
lowerCaseRoot := strings.ToLower(f.root)
f.slashRoot = "/" + lowerCaseRoot
f.slashRootSlash = f.slashRoot
if lowerCaseRoot != "" {
f.slashRootSlash += "/"
}
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *Fs) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
o.setMetadataFromEntry(info)
} else {
err := o.readEntryAndSetMetadata()
if err != nil {
// logged already fs.Debug("Failed to read info: %s", err)
return nil
}
}
return o
}
// NewFsObject returns an FsObject from a path
//
// May return nil if an error occurred
func (f *Fs) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
}
// Strips the root off path and returns it
func (f *Fs) stripRoot(path string) *string {
lowercase := strings.ToLower(path)
if !strings.HasPrefix(lowercase, f.slashRootSlash) {
fs.Stats.Error()
fs.ErrorLog(f, "Path '%s' is not under root '%s'", path, f.slashRootSlash)
return nil
}
stripped := path[len(f.slashRootSlash):]
return &stripped
}
// Walk the root returning a channel of FsObjects
func (f *Fs) list(out fs.ObjectsChan) {
// Track path component case, it could be different for entries coming from DropBox API
// See https://www.dropboxforum.com/hc/communities/public/questions/201665409-Wrong-character-case-of-folder-name-when-calling-listFolder-using-Sync-API?locale=en-us
// and https://github.com/ncw/rclone/issues/53
nameTree := newNameTree()
cursor := ""
for {
deltaPage, err := f.db.Delta(cursor, f.slashRoot)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list: %s", err)
break
} else {
if deltaPage.Reset && cursor != "" {
fs.ErrorLog(f, "Unexpected reset during listing - try again")
fs.Stats.Error()
break
}
fs.Debug(f, "%d delta entries received", len(deltaPage.Entries))
for i := range deltaPage.Entries {
deltaEntry := &deltaPage.Entries[i]
entry := deltaEntry.Entry
if entry == nil {
// This notifies of a deleted object
} else {
if len(entry.Path) <= 1 || entry.Path[0] != '/' {
fs.Stats.Error()
fs.ErrorLog(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path)
continue
}
lastSlashIndex := strings.LastIndex(entry.Path, "/")
var parentPath string
if lastSlashIndex == 0 {
parentPath = ""
} else {
parentPath = entry.Path[1:lastSlashIndex]
}
lastComponent := entry.Path[lastSlashIndex+1:]
if entry.IsDir {
nameTree.PutCaseCorrectDirectoryName(parentPath, lastComponent)
} else {
parentPathCorrectCase := nameTree.GetPathWithCorrectCase(parentPath)
if parentPathCorrectCase != nil {
path := f.stripRoot(*parentPathCorrectCase + "/" + lastComponent)
if path == nil {
// an error occurred and logged by stripRoot
continue
}
out <- f.newFsObjectWithInfo(*path, entry)
} else {
nameTree.PutFile(parentPath, lastComponent, entry)
}
}
}
}
if !deltaPage.HasMore {
break
}
cursor = deltaPage.Cursor.Cursor
}
}
walkFunc := func(caseCorrectFilePath string, entry *dropbox.Entry) {
path := f.stripRoot("/" + caseCorrectFilePath)
if path == nil {
// an error occurred and logged by stripRoot
return
}
out <- f.newFsObjectWithInfo(*path, entry)
}
nameTree.WalkFiles(f.root, walkFunc)
}
// List walks the path returning a channel of FsObjects
func (f *Fs) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
defer close(out)
f.list(out)
}()
return out
}
// ListDir walks the path returning a channel of FsObjects
func (f *Fs) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
entry, err := f.db.Metadata(f.root, true, false, "", "", metadataLimit)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list directories in root: %s", err)
} else {
for i := range entry.Contents {
entry := &entry.Contents[i]
if entry.IsDir {
name := f.stripRoot(entry.Path)
if name == nil {
// an error occurred and logged by stripRoot
continue
}
out <- &fs.Dir{
Name: *name,
When: time.Time(entry.ClientMtime),
Bytes: entry.Bytes,
Count: -1,
}
}
}
}
}()
return out
}
// A read closer which doesn't close the input
type readCloser struct {
in io.Reader
}
// Read bytes from the object - see io.Reader
func (rc *readCloser) Read(p []byte) (n int, err error) {
return rc.in.Read(p)
}
// Dummy close function
func (rc *readCloser) Close() error {
return nil
}
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
return o, o.Update(in, modTime, size)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir() error {
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
if err == nil {
if entry.IsDir {
return nil
}
return fmt.Errorf("%q already exists as file", f.root)
}
_, err = f.db.CreateFolder(f.slashRoot)
return err
}
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir() error {
entry, err := f.db.Metadata(f.slashRoot, true, false, "", "", 16)
if err != nil {
return err
}
if len(entry.Contents) != 0 {
return errors.New("Directory not empty")
}
return f.Purge()
}
// Precision returns the precision
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
srcPath := srcObj.remotePath()
dstPath := dstObj.remotePath()
entry, err := f.db.Copy(srcPath, dstPath, false)
if err != nil {
return nil, fmt.Errorf("Copy failed: %s", err)
}
dstObj.setMetadataFromEntry(entry)
return dstObj, nil
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
// Let dropbox delete the filesystem tree
_, err := f.db.Delete(f.slashRoot)
return err
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
srcPath := srcObj.remotePath()
dstPath := dstObj.remotePath()
entry, err := f.db.Move(srcPath, dstPath)
if err != nil {
return nil, fmt.Errorf("Move failed: %s", err)
}
dstObj.setMetadataFromEntry(entry)
return dstObj, nil
}
// DirMove moves src to this remote using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debug(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
// Check if destination exists
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
if err == nil && !entry.IsDeleted {
return fs.ErrorDirExists
}
// Do the move
_, err = f.db.Move(srcFs.slashRoot, f.slashRoot)
if err != nil {
return fmt.Errorf("MoveDir failed: %v", err)
}
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Md5sum() (string, error) {
return "", nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.bytes
}
// setMetadataFromEntry sets the fs data from a dropbox.Entry
//
// This isn't a complete set of metadata and has an inacurate date
func (o *Object) setMetadataFromEntry(info *dropbox.Entry) {
o.bytes = info.Bytes
o.modTime = time.Time(info.ClientMtime)
o.hasMetadata = true
}
// Reads the entry from dropbox
func (o *Object) readEntry() (*dropbox.Entry, error) {
entry, err := o.fs.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit)
if err != nil {
fs.Debug(o, "Error reading file: %s", err)
return nil, fmt.Errorf("Error reading file: %s", err)
}
return entry, nil
}
// Read entry if not set and set metadata from it
func (o *Object) readEntryAndSetMetadata() error {
// Last resort set time from client
if !o.modTime.IsZero() {
return nil
}
entry, err := o.readEntry()
if err != nil {
return err
}
o.setMetadataFromEntry(entry)
return nil
}
// Returns the remote path for the object
func (o *Object) remotePath() string {
return o.fs.slashRootSlash + o.remote
}
// Returns the key for the metadata database for a given path
func metadataKey(path string) string {
// NB File system is case insensitive
path = strings.ToLower(path)
hash := md5.New()
_, _ = hash.Write([]byte(path))
return fmt.Sprintf("%x", hash.Sum(nil))
}
// Returns the key for the metadata database
func (o *Object) metadataKey() string {
return metadataKey(o.remotePath())
}
// readMetaData gets the info if it hasn't already been fetched
func (o *Object) readMetaData() (err error) {
if o.hasMetadata {
return nil
}
// Last resort
return o.readEntryAndSetMetadata()
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
return o.modTime
}
// SetModTime sets the modification time of the local fs object
//
// Commits the datastore
func (o *Object) SetModTime(modTime time.Time) {
// FIXME not implemented
return
}
// Storable returns whether this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open() (in io.ReadCloser, err error) {
in, _, err = o.fs.db.Download(o.remotePath(), "", 0)
return
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
fs.ErrorLog(o, "File name disallowed - not uploading")
return nil
}
entry, err := o.fs.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
if err != nil {
return fmt.Errorf("Upload failed: %s", err)
}
o.setMetadataFromEntry(entry)
return nil
}
// Remove an object
func (o *Object) Remove() error {
_, err := o.fs.db.Delete(o.remotePath())
return err
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

56
dropbox/dropbox_test.go Normal file
View File

@@ -0,0 +1,56 @@
// Test Dropbox filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package dropbox_test
import (
"testing"
"github.com/ncw/rclone/dropbox"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func init() {
fstests.NilObject = fs.Object((*dropbox.Object)(nil))
fstests.RemoteName = "TestDropbox:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

179
dropbox/nametree.go Normal file
View File

@@ -0,0 +1,179 @@
package dropbox
import (
"bytes"
"fmt"
"strings"
"github.com/ncw/rclone/fs"
"github.com/stacktic/dropbox"
)
type nameTreeNode struct {
// Map from lowercase directory name to tree node
Directories map[string]*nameTreeNode
// Map from file name (case sensitive) to dropbox entry
Files map[string]*dropbox.Entry
// Empty string if exact case is unknown or root node
CaseCorrectName string
}
// ------------------------------------------------------------
func newNameTreeNode(caseCorrectName string) *nameTreeNode {
return &nameTreeNode{
CaseCorrectName: caseCorrectName,
Directories: make(map[string]*nameTreeNode),
Files: make(map[string]*dropbox.Entry),
}
}
func newNameTree() *nameTreeNode {
return newNameTreeNode("")
}
func (tree *nameTreeNode) String() string {
if len(tree.CaseCorrectName) == 0 {
return "nameTreeNode/<root>"
}
return fmt.Sprintf("nameTreeNode/%q", tree.CaseCorrectName)
}
func (tree *nameTreeNode) getTreeNode(path string) *nameTreeNode {
if len(path) == 0 {
// no lookup required, just return root
return tree
}
current := tree
for _, component := range strings.Split(path, "/") {
if len(component) == 0 {
fs.Stats.Error()
fs.ErrorLog(tree, "getTreeNode: path component is empty (full path %q)", path)
return nil
}
lowercase := strings.ToLower(component)
lookup := current.Directories[lowercase]
if lookup == nil {
lookup = newNameTreeNode("")
current.Directories[lowercase] = lookup
}
current = lookup
}
return current
}
func (tree *nameTreeNode) PutCaseCorrectDirectoryName(parentPath string, caseCorrectDirectoryName string) {
if len(caseCorrectDirectoryName) == 0 {
fs.Stats.Error()
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: empty caseCorrectDirectoryName is not allowed (parentPath: %q)", parentPath)
return
}
node := tree.getTreeNode(parentPath)
if node == nil {
return
}
lowerCaseDirectoryName := strings.ToLower(caseCorrectDirectoryName)
directory := node.Directories[lowerCaseDirectoryName]
if directory == nil {
directory = newNameTreeNode(caseCorrectDirectoryName)
node.Directories[lowerCaseDirectoryName] = directory
} else {
if len(directory.CaseCorrectName) > 0 {
fs.Stats.Error()
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: directory %q is already exists under parent path %q", caseCorrectDirectoryName, parentPath)
return
}
directory.CaseCorrectName = caseCorrectDirectoryName
}
}
func (tree *nameTreeNode) PutFile(parentPath string, caseCorrectFileName string, dropboxEntry *dropbox.Entry) {
node := tree.getTreeNode(parentPath)
if node == nil {
return
}
if node.Files[caseCorrectFileName] != nil {
fs.Stats.Error()
fs.ErrorLog(tree, "PutFile: file %q is already exists at %q", caseCorrectFileName, parentPath)
return
}
node.Files[caseCorrectFileName] = dropboxEntry
}
func (tree *nameTreeNode) GetPathWithCorrectCase(path string) *string {
if path == "" {
empty := ""
return &empty
}
var result bytes.Buffer
current := tree
for _, component := range strings.Split(path, "/") {
if component == "" {
fs.Stats.Error()
fs.ErrorLog(tree, "GetPathWithCorrectCase: path component is empty (full path %q)", path)
return nil
}
lowercase := strings.ToLower(component)
current = current.Directories[lowercase]
if current == nil || current.CaseCorrectName == "" {
return nil
}
_, _ = result.WriteString("/")
_, _ = result.WriteString(current.CaseCorrectName)
}
resultString := result.String()
return &resultString
}
type nameTreeFileWalkFunc func(caseCorrectFilePath string, entry *dropbox.Entry)
func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFileWalkFunc) {
var prefix string
if currentPath == "" {
prefix = ""
} else {
prefix = currentPath + "/"
}
for name, entry := range tree.Files {
walkFunc(prefix+name, entry)
}
for lowerCaseName, directory := range tree.Directories {
caseCorrectName := directory.CaseCorrectName
if caseCorrectName == "" {
fs.Stats.Error()
fs.ErrorLog(tree, "WalkFiles: exact name of the directory %q is unknown (parent path: %q)", lowerCaseName, currentPath)
continue
}
directory.walkFilesRec(prefix+caseCorrectName, walkFunc)
}
}
func (tree *nameTreeNode) WalkFiles(rootPath string, walkFunc nameTreeFileWalkFunc) {
node := tree.getTreeNode(rootPath)
if node == nil {
return
}
node.walkFilesRec(rootPath, walkFunc)
}

124
dropbox/nametree_test.go Normal file
View File

@@ -0,0 +1,124 @@
package dropbox
import (
"testing"
"github.com/ncw/rclone/fs"
dropboxapi "github.com/stacktic/dropbox"
)
func assert(t *testing.T, shouldBeTrue bool, failMessage string) {
if !shouldBeTrue {
t.Fatal(failMessage)
}
}
func TestPutCaseCorrectDirectoryName(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutCaseCorrectDirectoryName("a/b", "C")
assert(t, tree.CaseCorrectName == "", "Root CaseCorrectName should be empty")
a := tree.Directories["a"]
assert(t, a.CaseCorrectName == "", "CaseCorrectName at 'a' should be empty")
b := a.Directories["b"]
assert(t, b.CaseCorrectName == "", "CaseCorrectName at 'a/b' should be empty")
c := b.Directories["c"]
assert(t, c.CaseCorrectName == "C", "CaseCorrectName at 'a/b/c' should be 'C'")
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
}
func TestPutCaseCorrectDirectoryNameEmptyComponent(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutCaseCorrectDirectoryName("/a", "C")
tree.PutCaseCorrectDirectoryName("b/", "C")
tree.PutCaseCorrectDirectoryName("a//b", "C")
assert(t, fs.Stats.GetErrors() == errors+3, "3 errors should be reported")
}
func TestPutCaseCorrectDirectoryNameEmptyParent(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutCaseCorrectDirectoryName("", "C")
c := tree.Directories["c"]
assert(t, c.CaseCorrectName == "C", "CaseCorrectName at 'c' should be 'C'")
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
}
func TestGetPathWithCorrectCase(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutCaseCorrectDirectoryName("a", "C")
assert(t, tree.GetPathWithCorrectCase("a/c") == nil, "Path for 'a' should not be available")
tree.PutCaseCorrectDirectoryName("", "A")
assert(t, *tree.GetPathWithCorrectCase("a/c") == "/A/C", "Path for 'a/c' should be '/A/C'")
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
}
func TestPutAndWalk(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
tree.PutCaseCorrectDirectoryName("", "A")
numCalled := 0
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
assert(t, entry.Path == "xxx", "entry.Path should be xxx")
numCalled++
}
tree.WalkFiles("", walkFunc)
assert(t, numCalled == 1, "walk func should be called only once")
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
}
func TestPutAndWalkWithPrefix(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
tree.PutCaseCorrectDirectoryName("", "A")
numCalled := 0
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
assert(t, entry.Path == "xxx", "entry.Path should be xxx")
numCalled++
}
tree.WalkFiles("A", walkFunc)
assert(t, numCalled == 1, "walk func should be called only once")
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
}
func TestPutAndWalkIncompleteTree(t *testing.T) {
errors := fs.Stats.GetErrors()
tree := newNameTree()
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
t.Fatal("Should not be called")
}
tree.WalkFiles("", walkFunc)
assert(t, fs.Stats.GetErrors() == errors+1, "One error should be reported")
}

View File

@@ -7,51 +7,108 @@ import (
"fmt"
"io"
"log"
"sort"
"strings"
"sync"
"time"
"github.com/VividCortex/ewma"
"github.com/tsenart/tb"
)
// Globals
var (
Stats = NewStats()
Stats = NewStats()
tokenBucket *tb.Bucket
)
// Stringset holds some strings
type StringSet map[string]bool
// Strings returns all the strings in the StringSet
func (ss StringSet) Strings() []string {
strings := make([]string, 0, len(ss))
for k := range ss {
strings = append(strings, k)
// Start the token bucket if necessary
func startTokenBucket() {
if bwLimit > 0 {
tokenBucket = tb.NewBucket(int64(bwLimit), 100*time.Millisecond)
Log(nil, "Starting bandwidth limiter at %vBytes/s", &bwLimit)
}
return strings
}
// String returns all the strings in the StringSet joined by comma
func (ss StringSet) String() string {
return strings.Join(ss.Strings(), ", ")
// stringSet holds a set of strings
type stringSet map[string]struct{}
// inProgress holds a synchronizes map of in progress transfers
type inProgress struct {
mu sync.Mutex
m map[string]*Account
}
// Stats limits and accounts all transfers
// newInProgress makes a new inProgress object
func newInProgress() *inProgress {
return &inProgress{
m: make(map[string]*Account, Config.Transfers),
}
}
// set marks the name as in progress
func (ip *inProgress) set(name string, acc *Account) {
ip.mu.Lock()
defer ip.mu.Unlock()
ip.m[name] = acc
}
// clear marks the name as no longer in progress
func (ip *inProgress) clear(name string) {
ip.mu.Lock()
defer ip.mu.Unlock()
delete(ip.m, name)
}
// get gets the account for name, of nil if not found
func (ip *inProgress) get(name string) *Account {
ip.mu.Lock()
defer ip.mu.Unlock()
return ip.m[name]
}
// Strings returns all the strings in the stringSet
func (ss stringSet) Strings() []string {
strings := make([]string, 0, len(ss))
for name := range ss {
var out string
if acc := Stats.inProgress.get(name); acc != nil {
out = acc.String()
} else {
out = name
}
strings = append(strings, " * "+out)
}
sorted := sort.StringSlice(strings)
sorted.Sort()
return sorted
}
// String returns all the file names in the stringSet joined by newline
func (ss stringSet) String() string {
return strings.Join(ss.Strings(), "\n")
}
// StatsInfo limits and accounts all transfers
type StatsInfo struct {
lock sync.RWMutex
bytes int64
errors int64
checks int64
checking StringSet
checking stringSet
transfers int64
transferring StringSet
transferring stringSet
start time.Time
inProgress *inProgress
}
// NewStats cretates an initialised StatsInfo
func NewStats() *StatsInfo {
return &StatsInfo{
checking: make(StringSet, Config.Checkers),
transferring: make(StringSet, Config.Transfers),
checking: make(stringSet, Config.Checkers),
transferring: make(stringSet, Config.Transfers),
start: time.Now(),
inProgress: newInProgress(),
}
}
@@ -60,29 +117,30 @@ func (s *StatsInfo) String() string {
s.lock.RLock()
defer s.lock.RUnlock()
dt := time.Now().Sub(s.start)
dt_seconds := dt.Seconds()
dtSeconds := dt.Seconds()
speed := 0.0
if dt > 0 {
speed = float64(s.bytes) / 1024 / dt_seconds
speed = float64(s.bytes) / 1024 / dtSeconds
}
dtRounded := dt - (dt % (time.Second / 10))
buf := &bytes.Buffer{}
fmt.Fprintf(buf, `
Transferred: %10d Bytes (%7.2f kByte/s)
Errors: %10d
Checks: %10d
Transferred: %10d
Elapsed time: %v
Elapsed time: %10v
`,
s.bytes, speed,
s.errors,
s.checks,
s.transfers,
dt)
dtRounded)
if len(s.checking) > 0 {
fmt.Fprintf(buf, "Checking: %s\n", s.checking)
fmt.Fprintf(buf, "Checking:\n%s\n", s.checking)
}
if len(s.transferring) > 0 {
fmt.Fprintf(buf, "Transferring: %s\n", s.transferring)
fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring)
}
return buf.String()
}
@@ -113,6 +171,23 @@ func (s *StatsInfo) GetErrors() int64 {
return s.errors
}
// ResetCounters sets the counters (bytes, checks, errors, transfers) to 0
func (s *StatsInfo) ResetCounters() {
s.lock.RLock()
defer s.lock.RUnlock()
s.bytes = 0
s.errors = 0
s.checks = 0
s.transfers = 0
}
// ResetErrors sets the errors count to 0
func (s *StatsInfo) ResetErrors() {
s.lock.RLock()
defer s.lock.RUnlock()
s.errors = 0
}
// Errored returns whether there have been any errors
func (s *StatsInfo) Errored() bool {
s.lock.RLock()
@@ -124,14 +199,14 @@ func (s *StatsInfo) Errored() bool {
func (s *StatsInfo) Error() {
s.lock.Lock()
defer s.lock.Unlock()
s.errors += 1
s.errors++
}
// Checking adds a check into the stats
func (s *StatsInfo) Checking(o Object) {
s.lock.Lock()
defer s.lock.Unlock()
s.checking[o.Remote()] = true
s.checking[o.Remote()] = struct{}{}
}
// DoneChecking removes a check from the stats
@@ -139,14 +214,21 @@ func (s *StatsInfo) DoneChecking(o Object) {
s.lock.Lock()
defer s.lock.Unlock()
delete(s.checking, o.Remote())
s.checks += 1
s.checks++
}
// GetTransfers reads the number of transfers
func (s *StatsInfo) GetTransfers() int64 {
s.lock.RLock()
defer s.lock.RUnlock()
return s.transfers
}
// Transferring adds a transfer into the stats
func (s *StatsInfo) Transferring(o Object) {
s.lock.Lock()
defer s.lock.Unlock()
s.transferring[o.Remote()] = true
s.transferring[o.Remote()] = struct{}{}
}
// DoneTransferring removes a transfer from the stats
@@ -154,36 +236,187 @@ func (s *StatsInfo) DoneTransferring(o Object) {
s.lock.Lock()
defer s.lock.Unlock()
delete(s.transferring, o.Remote())
s.transfers += 1
s.transfers++
}
// Account limits and accounts for one transfer
type Account struct {
in io.ReadCloser
bytes int64
// The mutex is to make sure Read() and Close() aren't called
// concurrently. Unfortunately the persistent connection loop
// in http transport calls Read() after Do() returns on
// CancelRequest so this race can happen when it apparently
// shouldn't.
mu sync.Mutex
in io.ReadCloser
size int64
name string
statmu sync.Mutex // Separate mutex for stat values.
bytes int64 // Total number of bytes read
start time.Time // Start time of first read
lpTime time.Time // Time of last average measurement
lpBytes int // Number of bytes read since last measurement
avg ewma.MovingAverage // Moving average of last few measurements
closed bool // set if the file is closed
exit chan struct{} // channel that will be closed when transfer is finished
}
// NewAccount makes a Account reader
func NewAccount(in io.ReadCloser) *Account {
return &Account{
in: in,
// NewAccount makes a Account reader for an object
func NewAccount(in io.ReadCloser, obj Object) *Account {
acc := &Account{
in: in,
size: obj.Size(),
name: obj.Remote(),
exit: make(chan struct{}),
avg: ewma.NewMovingAverage(),
lpTime: time.Now(),
}
go acc.averageLoop()
Stats.inProgress.set(acc.name, acc)
return acc
}
func (file *Account) averageLoop() {
tick := time.NewTicker(time.Second)
defer tick.Stop()
for {
select {
case now := <-tick.C:
file.statmu.Lock()
// Add average of last second.
elapsed := now.Sub(file.lpTime).Seconds()
avg := float64(file.lpBytes) / elapsed
file.avg.Add(avg)
file.lpBytes = 0
file.lpTime = now
// Unlock stats
file.statmu.Unlock()
case <-file.exit:
return
}
}
}
// Read bytes from the object - see io.Reader
func (file *Account) Read(p []byte) (n int, err error) {
file.mu.Lock()
defer file.mu.Unlock()
// Set start time.
file.statmu.Lock()
if file.start.IsZero() {
file.start = time.Now()
}
file.statmu.Unlock()
n, err = file.in.Read(p)
// Update Stats
file.statmu.Lock()
file.lpBytes += n
file.bytes += int64(n)
file.statmu.Unlock()
Stats.Bytes(int64(n))
if err == io.EOF {
// FIXME Do something?
// Limit the transfer speed if required
if tokenBucket != nil {
tokenBucket.Wait(int64(n))
}
return
}
// Progress returns bytes read as well as the size.
// Size can be <= 0 if the size is unknown.
func (file *Account) Progress() (bytes, size int64) {
if file == nil {
return 0, 0
}
file.statmu.Lock()
if bytes > size {
size = 0
}
defer file.statmu.Unlock()
return file.bytes, file.size
}
// Speed returns the speed of the current file transfer
// in bytes per second, as well a an exponentially weighted moving average
// If no read has completed yet, 0 is returned for both values.
func (file *Account) Speed() (bps, current float64) {
if file == nil {
return 0, 0
}
file.statmu.Lock()
defer file.statmu.Unlock()
if file.bytes == 0 {
return 0, 0
}
// Calculate speed from first read.
total := float64(time.Now().Sub(file.start)) / float64(time.Second)
bps = float64(file.bytes) / total
current = file.avg.Value()
return
}
// ETA returns the ETA of the current operation,
// rounded to full seconds.
// If the ETA cannot be determined 'ok' returns false.
func (file *Account) ETA() (eta time.Duration, ok bool) {
if file == nil || file.size <= 0 {
return 0, false
}
file.statmu.Lock()
defer file.statmu.Unlock()
if file.bytes == 0 {
return 0, false
}
left := file.size - file.bytes
if left <= 0 {
return 0, true
}
avg := file.avg.Value()
if avg <= 0 {
return 0, false
}
seconds := float64(left) / file.avg.Value()
return time.Duration(time.Second * time.Duration(int(seconds))), true
}
// String produces stats for this file
func (file *Account) String() string {
a, b := file.Progress()
avg, cur := file.Speed()
eta, etaok := file.ETA()
etas := "-"
if etaok {
if eta > 0 {
etas = fmt.Sprintf("%v", eta)
} else {
etas = "0s"
}
}
name := []rune(file.name)
if len(name) > 45 {
where := len(name) - 42
name = append([]rune{'.', '.', '.'}, name[where:]...)
}
if b <= 0 {
return fmt.Sprintf("%45s: avg:%7.1f, cur: %6.1f kByte/s. ETA: %s", string(name), avg/1024, cur/1024, etas)
}
return fmt.Sprintf("%45s: %2d%% done. avg: %6.1f, cur: %6.1f kByte/s. ETA: %s", string(name), int(100*float64(a)/float64(b)), avg/1024, cur/1024, etas)
}
// Close the object
func (file *Account) Close() error {
// FIXME do something?
file.mu.Lock()
defer file.mu.Unlock()
if file.closed {
return nil
}
file.closed = true
close(file.exit)
Stats.inProgress.clear(file.name)
return file.in.Close()
}

199
fs/buffer.go Normal file
View File

@@ -0,0 +1,199 @@
package fs
import (
"fmt"
"io"
)
// asyncReader will do async read-ahead from the input reader
// and make the data available as an io.Reader.
// This should be fully transparent, except that once an error
// has been returned from the Reader, it will not recover.
type asyncReader struct {
in io.ReadCloser // Input reader
ready chan *buffer // Buffers ready to be handed to the reader
reuse chan *buffer // Buffers to reuse for input reading
exit chan struct{} // Closes when finished
buffers int // Number of buffers
err error // If an error has occurred it is here
cur *buffer // Current buffer being served
exited chan struct{} // Channel is closed been the async reader shuts down
closed bool // Has the parent reader been closed?
}
// newAsyncReader returns a reader that will asynchronously read from
// the supplied Reader into a number of buffers each with a given size.
// It will start reading from the input at once, maybe even before this
// function has returned.
// The input can be read from the returned reader.
// When done use Close to release the buffers and close the supplied input.
func newAsyncReader(rd io.ReadCloser, buffers, size int) (io.ReadCloser, error) {
if size <= 0 {
return nil, fmt.Errorf("buffer size too small")
}
if buffers <= 0 {
return nil, fmt.Errorf("number of buffers too small")
}
if rd == nil {
return nil, fmt.Errorf("nil reader supplied")
}
a := &asyncReader{}
a.init(rd, buffers, size)
return a, nil
}
func (a *asyncReader) init(rd io.ReadCloser, buffers, size int) {
a.in = rd
a.ready = make(chan *buffer, buffers)
a.reuse = make(chan *buffer, buffers)
a.exit = make(chan struct{}, 0)
a.exited = make(chan struct{}, 0)
a.buffers = buffers
a.cur = nil
// Create buffers
for i := 0; i < buffers; i++ {
a.reuse <- newBuffer(size)
}
// Start async reader
go func() {
// Ensure that when we exit this is signalled.
defer close(a.exited)
for {
select {
case b := <-a.reuse:
err := b.read(a.in)
a.ready <- b
if err != nil {
close(a.ready)
return
}
case <-a.exit:
return
}
}
}()
}
// Read will return the next available data.
func (a *asyncReader) fill() (err error) {
if a.cur.isEmpty() {
if a.cur != nil {
a.reuse <- a.cur
a.cur = nil
}
b, ok := <-a.ready
if !ok {
return a.err
}
a.cur = b
}
return nil
}
// Read will return the next available data.
func (a *asyncReader) Read(p []byte) (n int, err error) {
// Swap buffer and maybe return error
err = a.fill()
if err != nil {
return 0, err
}
// Copy what we can
n = copy(p, a.cur.buffer())
a.cur.increment(n)
// If at end of buffer, return any error, if present
if a.cur.isEmpty() {
a.err = a.cur.err
return n, a.err
}
return n, nil
}
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *asyncReader) WriteTo(w io.Writer) (n int64, err error) {
n = 0
for {
err = a.fill()
if err != nil {
return n, err
}
n2, err := w.Write(a.cur.buffer())
a.cur.increment(n2)
n += int64(n2)
if err != nil {
return n, err
}
if a.cur.err != nil {
a.err = a.cur.err
return n, a.cur.err
}
}
}
// Close will ensure that the underlying async reader is shut down.
// It will also close the input supplied on newAsyncReader.
func (a *asyncReader) Close() (err error) {
select {
case <-a.exited:
default:
close(a.exit)
<-a.exited
}
if !a.closed {
a.closed = true
return a.in.Close()
}
return nil
}
// Internal buffer
// If an error is present, it must be returned
// once all buffer content has been served.
type buffer struct {
buf []byte
err error
offset int
size int
}
func newBuffer(size int) *buffer {
return &buffer{buf: make([]byte, size), err: nil, size: size}
}
// isEmpty returns true is offset is at end of
// buffer, or
func (b *buffer) isEmpty() bool {
if b == nil {
return true
}
if len(b.buf)-b.offset <= 0 {
return true
}
return false
}
// read into start of the buffer from the supplied reader,
// resets the offset and updates the size of the buffer.
// Any error encountered during the read is returned.
func (b *buffer) read(rd io.Reader) error {
var n int
n, b.err = rd.Read(b.buf[0:b.size])
b.buf = b.buf[0:n]
b.offset = 0
return b.err
}
// Return the buffer at current offset
func (b *buffer) buffer() []byte {
return b.buf[b.offset:]
}
// increment the offset
func (b *buffer) increment(n int) {
b.offset += n
}

268
fs/buffer_test.go Normal file
View File

@@ -0,0 +1,268 @@
package fs
import (
"bufio"
"bytes"
"io"
"io/ioutil"
"strings"
"testing"
"testing/iotest"
)
func TestAsyncReader(t *testing.T) {
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
ar, err := newAsyncReader(buf, 4, 10000)
if err != nil {
t.Fatal("error when creating:", err)
}
var dst = make([]byte, 100)
n, err := ar.Read(dst)
if err != nil {
t.Fatal("error when reading:", err)
}
if n != 10 {
t.Fatal("unexpected length, expected 10, got ", n)
}
n, err = ar.Read(dst)
if err != io.EOF {
t.Fatal("expected io.EOF, got", err)
}
if n != 0 {
t.Fatal("unexpected length, expected 0, got ", n)
}
// Test read after error
n, err = ar.Read(dst)
if err != io.EOF {
t.Fatal("expected io.EOF, got", err)
}
if n != 0 {
t.Fatal("unexpected length, expected 0, got ", n)
}
err = ar.Close()
if err != nil {
t.Fatal("error when closing:", err)
}
// Test double close
err = ar.Close()
if err != nil {
t.Fatal("error when closing:", err)
}
// Test Close without reading everything
buf = ioutil.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
ar, err = newAsyncReader(buf, 4, 100)
if err != nil {
t.Fatal("error when creating:", err)
}
err = ar.Close()
if err != nil {
t.Fatal("error when closing, noread:", err)
}
}
func TestAsyncWriteTo(t *testing.T) {
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
ar, err := newAsyncReader(buf, 4, 10000)
if err != nil {
t.Fatal("error when creating:", err)
}
var dst = &bytes.Buffer{}
n, err := io.Copy(dst, ar)
if err != io.EOF {
t.Fatal("error when reading:", err)
}
if n != 10 {
t.Fatal("unexpected length, expected 10, got ", n)
}
// Should still return EOF
n, err = io.Copy(dst, ar)
if err != io.EOF {
t.Fatal("expected io.EOF, got", err)
}
if n != 0 {
t.Fatal("unexpected length, expected 0, got ", n)
}
err = ar.Close()
if err != nil {
t.Fatal("error when closing:", err)
}
}
func TestAsyncReaderErrors(t *testing.T) {
// test nil reader
_, err := newAsyncReader(nil, 4, 10000)
if err == nil {
t.Fatal("expected error when creating, but got nil")
}
// invalid buffer number
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
_, err = newAsyncReader(buf, 0, 10000)
if err == nil {
t.Fatal("expected error when creating, but got nil")
}
_, err = newAsyncReader(buf, -1, 10000)
if err == nil {
t.Fatal("expected error when creating, but got nil")
}
// invalid buffer size
_, err = newAsyncReader(buf, 4, 0)
if err == nil {
t.Fatal("expected error when creating, but got nil")
}
_, err = newAsyncReader(buf, 4, -1)
if err == nil {
t.Fatal("expected error when creating, but got nil")
}
}
// Complex read tests, leveraged from "bufio".
type readMaker struct {
name string
fn func(io.Reader) io.Reader
}
var readMakers = []readMaker{
{"full", func(r io.Reader) io.Reader { return r }},
{"byte", iotest.OneByteReader},
{"half", iotest.HalfReader},
{"data+err", iotest.DataErrReader},
{"timeout", iotest.TimeoutReader},
}
// Call Read to accumulate the text of a file
func reads(buf io.Reader, m int) string {
var b [1000]byte
nb := 0
for {
n, err := buf.Read(b[nb : nb+m])
nb += n
if err == io.EOF {
break
} else if err != nil && err != iotest.ErrTimeout {
panic("Data: " + err.Error())
} else if err != nil {
break
}
}
return string(b[0:nb])
}
type bufReader struct {
name string
fn func(io.Reader) string
}
var bufreaders = []bufReader{
{"1", func(b io.Reader) string { return reads(b, 1) }},
{"2", func(b io.Reader) string { return reads(b, 2) }},
{"3", func(b io.Reader) string { return reads(b, 3) }},
{"4", func(b io.Reader) string { return reads(b, 4) }},
{"5", func(b io.Reader) string { return reads(b, 5) }},
{"7", func(b io.Reader) string { return reads(b, 7) }},
}
const minReadBufferSize = 16
var bufsizes = []int{
0, minReadBufferSize, 23, 32, 46, 64, 93, 128, 1024, 4096,
}
// Test various input buffer sizes, number of buffers and read sizes.
func TestAsyncReaderSizes(t *testing.T) {
var texts [31]string
str := ""
all := ""
for i := 0; i < len(texts)-1; i++ {
texts[i] = str + "\n"
all += texts[i]
str += string(i%26 + 'a')
}
texts[len(texts)-1] = all
for h := 0; h < len(texts); h++ {
text := texts[h]
for i := 0; i < len(readMakers); i++ {
for j := 0; j < len(bufreaders); j++ {
for k := 0; k < len(bufsizes); k++ {
for l := 1; l < 10; l++ {
readmaker := readMakers[i]
bufreader := bufreaders[j]
bufsize := bufsizes[k]
read := readmaker.fn(strings.NewReader(text))
buf := bufio.NewReaderSize(read, bufsize)
ar, _ := newAsyncReader(ioutil.NopCloser(buf), l, 100)
s := bufreader.fn(ar)
// "timeout" expects the Reader to recover, asyncReader does not.
if s != text && readmaker.name != "timeout" {
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
readmaker.name, bufreader.name, bufsize, text, s)
}
err := ar.Close()
if err != nil {
t.Fatal("Unexpected close error:", err)
}
}
}
}
}
}
}
// Test various input buffer sizes, number of buffers and read sizes.
func TestAsyncReaderWriteTo(t *testing.T) {
var texts [31]string
str := ""
all := ""
for i := 0; i < len(texts)-1; i++ {
texts[i] = str + "\n"
all += texts[i]
str += string(i%26 + 'a')
}
texts[len(texts)-1] = all
for h := 0; h < len(texts); h++ {
text := texts[h]
for i := 0; i < len(readMakers); i++ {
for j := 0; j < len(bufreaders); j++ {
for k := 0; k < len(bufsizes); k++ {
for l := 1; l < 10; l++ {
readmaker := readMakers[i]
bufreader := bufreaders[j]
bufsize := bufsizes[k]
read := readmaker.fn(strings.NewReader(text))
buf := bufio.NewReaderSize(read, bufsize)
ar, _ := newAsyncReader(ioutil.NopCloser(buf), l, 100)
dst := &bytes.Buffer{}
wt := ar.(io.WriterTo)
_, err := wt.WriteTo(dst)
if err != nil && err != io.EOF && err != iotest.ErrTimeout {
t.Fatal("Copy:", err)
}
s := dst.String()
// "timeout" expects the Reader to recover, asyncReader does not.
if s != text && readmaker.name != "timeout" {
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
readmaker.name, bufreader.name, bufsize, text, s)
}
err = ar.Close()
if err != nil {
t.Fatal("Unexpected close error:", err)
}
}
}
}
}
}
}

View File

@@ -4,8 +4,11 @@ package fs
import (
"bufio"
"encoding/base64"
"fmt"
"log"
"math"
"net/http"
"os"
"os/user"
"path"
@@ -14,66 +17,241 @@ import (
"strings"
"time"
"crypto/tls"
"github.com/Unknwon/goconfig"
"github.com/ogier/pflag"
"github.com/mreiferson/go-httpclient"
"github.com/spf13/pflag"
)
const (
configFileName = ".rclone.conf"
)
// SizeSuffix is parsed by flag with k/M/G suffixes
type SizeSuffix int64
// Global
var (
// Config file
// ConfigFile is the config file data structure
ConfigFile *goconfig.ConfigFile
// Home directory
// HomeDir is the home directory of the user
HomeDir = configHome()
// Config file path
// ConfigPath points to the config file
ConfigPath = path.Join(HomeDir, configFileName)
// Global config
// Config is the global config
Config = &ConfigInfo{}
// Flags
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
skipVerify = pflag.BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
bwLimit SizeSuffix
)
// Filesystem config options
func init() {
pflag.VarP(&bwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix k|M|G")
}
// Turn SizeSuffix into a string
func (x SizeSuffix) String() string {
scaled := float64(0)
suffix := ""
switch {
case x == 0:
return "0"
case x < 1024*1024:
scaled = float64(x) / 1024
suffix = "k"
case x < 1024*1024*1024:
scaled = float64(x) / 1024 / 1024
suffix = "M"
default:
scaled = float64(x) / 1024 / 1024 / 1024
suffix = "G"
}
if math.Floor(scaled) == scaled {
return fmt.Sprintf("%.0f%s", scaled, suffix)
}
return fmt.Sprintf("%.3f%s", scaled, suffix)
}
// Set a SizeSuffix
func (x *SizeSuffix) Set(s string) error {
if len(s) == 0 {
return fmt.Errorf("Empty string")
}
suffix := s[len(s)-1]
suffixLen := 1
var multiplier float64
switch suffix {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
suffixLen = 0
multiplier = 1 << 10
case 'k', 'K':
multiplier = 1 << 10
case 'm', 'M':
multiplier = 1 << 20
case 'g', 'G':
multiplier = 1 << 30
default:
return fmt.Errorf("Bad suffix %q", suffix)
}
s = s[:len(s)-suffixLen]
value, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if value < 0 {
return fmt.Errorf("Size can't be negative %q", s)
}
value *= multiplier
*x = SizeSuffix(value)
return nil
}
// Type of the value
func (x *SizeSuffix) Type() string {
return "int64"
}
// Check it satisfies the interface
var _ pflag.Value = (*SizeSuffix)(nil)
// Obscure a config value
func Obscure(x string) string {
y := []byte(x)
for i := range y {
y[i] ^= byte(i) ^ 0xAA
}
return base64.StdEncoding.EncodeToString(y)
}
// Reveal a config value
func Reveal(y string) string {
x, err := base64.StdEncoding.DecodeString(y)
if err != nil {
log.Fatalf("Failed to reveal %q: %v", y, err)
}
for i := range x {
x[i] ^= byte(i) ^ 0xAA
}
return string(x)
}
// ConfigInfo is filesystem config options
type ConfigInfo struct {
Verbose bool
Quiet bool
DryRun bool
ModifyWindow time.Duration
Checkers int
Transfers int
Verbose bool
Quiet bool
DryRun bool
CheckSum bool
SizeOnly bool
ModifyWindow time.Duration
Checkers int
Transfers int
ConnectTimeout time.Duration // Connect timeout
Timeout time.Duration // Data channel timeout
DumpHeaders bool
DumpBodies bool
Filter *Filter
InsecureSkipVerify bool // Skip server certificate verification
}
// Transport returns an http.RoundTripper with the correct timeouts
func (ci *ConfigInfo) Transport() http.RoundTripper {
t := &httpclient.Transport{
Proxy: http.ProxyFromEnvironment,
MaxIdleConnsPerHost: ci.Checkers + ci.Transfers + 1,
// ConnectTimeout, if non-zero, is the maximum amount of time a dial will wait for
// a connect to complete.
ConnectTimeout: ci.ConnectTimeout,
// ResponseHeaderTimeout, if non-zero, specifies the amount of
// time to wait for a server's response headers after fully
// writing the request (including its body, if any). This
// time does not include the time to read the response body.
ResponseHeaderTimeout: ci.Timeout,
// RequestTimeout, if non-zero, specifies the amount of time for the entire
// request to complete (including all of the above timeouts + entire response body).
// This should never be less than the sum total of the above two timeouts.
//RequestTimeout: NOT SET,
// ReadWriteTimeout, if non-zero, will set a deadline for every Read and
// Write operation on the request connection.
ReadWriteTimeout: ci.Timeout,
// InsecureSkipVerify controls whether a client verifies the
// server's certificate chain and host name.
// If InsecureSkipVerify is true, TLS accepts any certificate
// presented by the server and any host name in that certificate.
// In this mode, TLS is susceptible to man-in-the-middle attacks.
// This should be used only for testing.
TLSClientConfig: &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify},
}
if ci.DumpHeaders || ci.DumpBodies {
return NewLoggedTransport(t, ci.DumpBodies)
}
return t
}
// Client returns an http.Client with the correct timeouts
func (ci *ConfigInfo) Client() *http.Client {
return &http.Client{
Transport: ci.Transport(),
}
}
// Find the config directory
func configHome() string {
// Find users home directory
usr, err := user.Current()
if err != nil {
log.Printf("Couldn't find home directory: %v", err)
return ""
if err == nil {
return usr.HomeDir
}
return usr.HomeDir
// Fall back to reading $HOME - work around user.Current() not
// working for cross compiled binaries on OSX.
// https://github.com/golang/go/issues/6376
home := os.Getenv("HOME")
if home != "" {
return home
}
log.Printf("Couldn't find home directory or read HOME environment variable.")
log.Printf("Defaulting to storing config in current directory.")
log.Printf("Use -config flag to workaround.")
log.Printf("Error was: %v", err)
return ""
}
// Loads the config file
// LoadConfig loads the config file
func LoadConfig() {
// Read some flags if set
//
// FIXME read these from the config file too
Config.Verbose = *verbose
Config.Quiet = *quiet
Config.Quiet = *dryRun
Config.ModifyWindow = *modifyWindow
Config.Checkers = *checkers
Config.Transfers = *transfers
Config.DryRun = *dryRun
Config.Timeout = *timeout
Config.ConnectTimeout = *connectTimeout
Config.CheckSum = *checkSum
Config.SizeOnly = *sizeOnly
Config.DumpHeaders = *dumpHeaders
Config.DumpBodies = *dumpBodies
Config.InsecureSkipVerify = *skipVerify
ConfigPath = *configFile
@@ -81,15 +259,24 @@ func LoadConfig() {
var err error
ConfigFile, err = goconfig.LoadConfigFile(ConfigPath)
if err != nil {
log.Printf("Failed to load config file %v - using defaults", ConfigPath)
log.Printf("Failed to load config file %v - using defaults: %v", ConfigPath, err)
ConfigFile, err = goconfig.LoadConfigFile(os.DevNull)
if err != nil {
log.Fatalf("Failed to read null config file: %v", err)
}
}
// Load filters
Config.Filter, err = NewFilter()
if err != nil {
log.Fatalf("Failed to load filters: %v", err)
}
// Start the token bucket limiter
startTokenBucket()
}
// Save configuration file.
// SaveConfig saves configuration file.
func SaveConfig() {
err := goconfig.SaveConfigFile(ConfigFile, ConfigPath)
if err != nil {
@@ -101,7 +288,7 @@ func SaveConfig() {
}
}
// Show an overview of the config file
// ShowRemotes shows an overview of the config file
func ShowRemotes() {
remotes := ConfigFile.GetSectionList()
if len(remotes) == 0 {
@@ -122,7 +309,7 @@ func ChooseRemote() string {
return Choose("remote", remotes, nil, false)
}
// Read some input
// ReadLine reads some input
func ReadLine() string {
buf := bufio.NewReader(os.Stdin)
line, err := buf.ReadString('\n')
@@ -147,14 +334,14 @@ func Command(commands []string) byte {
if len(result) != 1 {
continue
}
i := strings.IndexByte(optString, result[0])
i := strings.Index(optString, string(result[0]))
if i >= 0 {
return result[0]
}
}
}
// Asks the user for Yes or No and returns true or false
// Confirm asks the user for Yes or No and returns true or false
func Confirm() bool {
return Command([]string{"yYes", "nNo"}) == 'y'
}
@@ -191,7 +378,7 @@ func Choose(what string, defaults, help []string, newOk bool) string {
}
}
// Show the contents of the remote
// ShowRemote shows the contents of the remote
func ShowRemote(name string) {
fmt.Printf("--------------------\n")
fmt.Printf("[%s]\n", name)
@@ -201,7 +388,7 @@ func ShowRemote(name string) {
fmt.Printf("--------------------\n")
}
// Print the contents of the remote and ask if it is OK
// OkRemote prints the contents of the remote and ask if it is OK
func OkRemote(name string) bool {
ShowRemote(name)
switch i := Command([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}); i {
@@ -218,7 +405,7 @@ func OkRemote(name string) bool {
return false
}
// Runs the config helper for the remote if needed
// RemoteConfig runs the config helper for the remote if needed
func RemoteConfig(name string) {
fmt.Printf("Remote config\n")
fsName := ConfigFile.MustValue(name, "type")
@@ -234,7 +421,7 @@ func RemoteConfig(name string) {
}
}
// Choose an option
// ChooseOption asks the user to choose an option
func ChooseOption(o *Option) string {
fmt.Println(o.Help)
if len(o.Examples) > 0 {
@@ -250,7 +437,7 @@ func ChooseOption(o *Option) string {
return ReadLine()
}
// Make a new remote
// NewRemote make a new remote from its name
func NewRemote(name string) {
fmt.Printf("What type of source is it?\n")
types := []string{}
@@ -274,7 +461,7 @@ func NewRemote(name string) {
EditRemote(name)
}
// Edit a remote
// EditRemote gets the user to edit a remote
func EditRemote(name string) {
ShowRemote(name)
fmt.Printf("Edit remote\n")
@@ -296,13 +483,13 @@ func EditRemote(name string) {
SaveConfig()
}
// Delete a remote
// DeleteRemote gets the user to delete a remote
func DeleteRemote(name string) {
ConfigFile.DeleteSection(name)
SaveConfig()
}
// Edit the config file interactively
// EditConfig edits the config file interactively
func EditConfig() {
for {
haveRemotes := len(ConfigFile.GetSectionList()) != 0
@@ -320,9 +507,23 @@ func EditConfig() {
name := ChooseRemote()
EditRemote(name)
case 'n':
fmt.Printf("name> ")
name := ReadLine()
NewRemote(name)
nameLoop:
for {
fmt.Printf("name> ")
name := ReadLine()
parts := matcher.FindStringSubmatch(name + ":")
switch {
case name == "":
fmt.Printf("Can't use empty name\n")
case isDriveLetter(name):
fmt.Printf("Can't use %q as it can be confused a drive letter\n", name)
case len(parts) != 3 || parts[2] != "":
fmt.Printf("Can't use %q as it has invalid characters in it %v\n", name, parts)
default:
NewRemote(name)
break nameLoop
}
}
case 'd':
name := ChooseRemote()
DeleteRemote(name)

75
fs/config_test.go Normal file
View File

@@ -0,0 +1,75 @@
package fs
import "testing"
func TestSizeSuffixString(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0"},
{102, "0.100k"},
{1024, "1k"},
{1024 * 1024, "1M"},
{1024 * 1024 * 1024, "1G"},
{10 * 1024 * 1024 * 1024, "10G"},
{10.1 * 1024 * 1024 * 1024, "10.100G"},
} {
ss := SizeSuffix(test.in)
got := ss.String()
if test.want != got {
t.Errorf("Want %v got %v", test.want, got)
}
}
}
func TestSizeSuffixSet(t *testing.T) {
for i, test := range []struct {
in string
want int64
err bool
}{
{"0", 0, false},
{"0.1k", 102, false},
{"0.1", 102, false},
{"1K", 1024, false},
{"1", 1024, false},
{"2.5", 1024 * 2.5, false},
{"1M", 1024 * 1024, false},
{"1.g", 1024 * 1024 * 1024, false},
{"10G", 10 * 1024 * 1024 * 1024, false},
{"", 0, true},
{"1p", 0, true},
{"1.p", 0, true},
{"1p", 0, true},
{"-1K", 0, true},
} {
ss := SizeSuffix(0)
err := ss.Set(test.in)
if (err != nil) != test.err {
t.Errorf("%d: Expecting error %v but got error %v", i, test.err, err)
}
got := int64(ss)
if test.want != got {
t.Errorf("%d: Want %v got %v", i, test.want, got)
}
}
}
func TestReveal(t *testing.T) {
for _, test := range []struct {
in string
want string
}{
{"", ""},
{"2sTcyNrA", "potato"},
} {
got := Reveal(test.in)
if got != test.want {
t.Errorf("%q: want %q got %q", test.in, test.want, got)
}
if Obscure(got) != test.in {
t.Errorf("%q: wasn't bidirectional", test.in)
}
}
}

12
fs/driveletter.go Normal file
View File

@@ -0,0 +1,12 @@
// +build !windows
package fs
// isDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
//
// On non windows platforms we don't have drive letters so we always
// return false
func isDriveLetter(name string) bool {
return false
}

13
fs/driveletter_windows.go Normal file
View File

@@ -0,0 +1,13 @@
// +build windows
package fs
// isDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
func isDriveLetter(name string) bool {
if len(name) != 1 {
return false
}
c := name[0]
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}

102
fs/error.go Normal file
View File

@@ -0,0 +1,102 @@
// Errors and error handling
package fs
import (
"fmt"
"net/http"
"net/url"
)
// Retry is an optional interface for error as to whether the
// operation should be retried at a high level.
//
// This should be returned from Update or Put methods as required
type Retry interface {
error
Retry() bool
}
// retryError is a type of error
type retryError string
// Error interface
func (r retryError) Error() string {
return string(r)
}
// Retry interface
func (r retryError) Retry() bool {
return true
}
// Check interface
var _ Retry = retryError("")
// RetryErrorf makes an error which indicates it would like to be retried
func RetryErrorf(format string, a ...interface{}) error {
return retryError(fmt.Sprintf(format, a...))
}
// PlainRetryError is an error wrapped so it will retry
type plainRetryError struct {
error
}
// Retry interface
func (err plainRetryError) Retry() bool {
return true
}
// Check interface
var _ Retry = plainRetryError{(error)(nil)}
// RetryError makes an error which indicates it would like to be retried
func RetryError(err error) error {
return plainRetryError{err}
}
// ShouldRetry looks at an error and tries to work out if retrying the
// operation that caused it would be a good idea. It returns true if
// the error implements Timeout() or Temporary() and it returns true.
func ShouldRetry(err error) bool {
if err == nil {
return false
}
// Unwrap url.Error
if urlErr, ok := err.(*url.Error); ok {
err = urlErr.Err
}
// Check for net error Timeout()
if x, ok := err.(interface {
Timeout() bool
}); ok && x.Timeout() {
return true
}
// Check for net error Temporary()
if x, ok := err.(interface {
Temporary() bool
}); ok && x.Temporary() {
return true
}
return false
}
// ShouldRetryHTTP returns a boolean as to whether this resp deserves.
// It checks to see if the HTTP response code is in the slice
// retryErrorCodes.
func ShouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
if resp == nil {
return false
}
for _, e := range retryErrorCodes {
if resp.StatusCode == e {
return true
}
}
return false
}

248
fs/filter.go Normal file
View File

@@ -0,0 +1,248 @@
// Control the filtering of files
package fs
import (
"bufio"
"fmt"
"os"
"regexp"
"strings"
"github.com/spf13/pflag"
)
// Global
var (
// Flags
deleteExcluded = pflag.BoolP("delete-excluded", "", false, "Delete files on dest excluded from sync")
filterRule = pflag.StringP("filter", "f", "", "Add a file-filtering rule")
filterFrom = pflag.StringP("filter-from", "", "", "Read filtering patterns from a file")
excludeRule = pflag.StringP("exclude", "", "", "Exclude files matching pattern")
excludeFrom = pflag.StringP("exclude-from", "", "", "Read exclude patterns from file")
includeRule = pflag.StringP("include", "", "", "Include files matching pattern")
includeFrom = pflag.StringP("include-from", "", "", "Read include patterns from file")
filesFrom = pflag.StringP("files-from", "", "", "Read list of source-file names from file")
minSize SizeSuffix
maxSize SizeSuffix
dumpFilters = pflag.BoolP("dump-filters", "", false, "Dump the filters to the output")
//cvsExclude = pflag.BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
)
func init() {
pflag.VarP(&minSize, "min-size", "", "Don't transfer any file smaller than this in k or suffix k|M|G")
pflag.VarP(&maxSize, "max-size", "", "Don't transfer any file larger than this in k or suffix k|M|G")
}
// rule is one filter rule
type rule struct {
Include bool
Regexp *regexp.Regexp
}
// Match returns true if rule matches path
func (r *rule) Match(path string) bool {
return r.Regexp.MatchString(path)
}
// String the rule
func (r *rule) String() string {
c := "-"
if r.Include {
c = "+"
}
return fmt.Sprintf("%s %s", c, r.Regexp.String())
}
// filesMap describes the map of files to transfer
type filesMap map[string]struct{}
// Filter describes any filtering in operation
type Filter struct {
DeleteExcluded bool
MinSize int64
MaxSize int64
rules []rule
files filesMap
}
// NewFilter parses the command line options and creates a Filter object
func NewFilter() (f *Filter, err error) {
f = &Filter{
DeleteExcluded: *deleteExcluded,
MinSize: int64(minSize),
MaxSize: int64(maxSize),
}
if *includeRule != "" {
err = f.Add(true, *includeRule)
if err != nil {
return nil, err
}
// Add implicit exclude
err = f.Add(false, "*")
if err != nil {
return nil, err
}
}
if *includeFrom != "" {
err := forEachLine(*includeFrom, func(line string) error {
return f.Add(true, line)
})
if err != nil {
return nil, err
}
// Add implicit exclude
err = f.Add(false, "*")
if err != nil {
return nil, err
}
}
if *excludeRule != "" {
err = f.Add(false, *excludeRule)
if err != nil {
return nil, err
}
}
if *excludeFrom != "" {
err := forEachLine(*excludeFrom, func(line string) error {
return f.Add(false, line)
})
if err != nil {
return nil, err
}
}
if *filterRule != "" {
err = f.AddRule(*filterRule)
if err != nil {
return nil, err
}
}
if *filterFrom != "" {
err := forEachLine(*filterFrom, f.AddRule)
if err != nil {
return nil, err
}
}
if *filesFrom != "" {
err := forEachLine(*filesFrom, func(line string) error {
return f.AddFile(line)
})
if err != nil {
return nil, err
}
}
if *dumpFilters {
fmt.Println("--- start filters ---")
fmt.Println(f.DumpFilters())
fmt.Println("--- end filters ---")
}
return f, nil
}
// Add adds a filter rule with include or exclude status indicated
func (f *Filter) Add(Include bool, glob string) error {
re, err := globToRegexp(glob)
if err != nil {
return err
}
rule := rule{
Include: Include,
Regexp: re,
}
f.rules = append(f.rules, rule)
return nil
}
// AddRule adds a filter rule with include/exclude indicated by the prefix
//
// These are
//
// + glob
// - glob
// !
//
// '+' includes the glob, '-' excludes it and '!' resets the filter list
//
// Line comments may be introduced with '#' or ';'
func (f *Filter) AddRule(rule string) error {
switch {
case rule == "!":
f.Clear()
return nil
case strings.HasPrefix(rule, "- "):
return f.Add(false, rule[2:])
case strings.HasPrefix(rule, "+ "):
return f.Add(true, rule[2:])
}
return fmt.Errorf("Malformed rule %q", rule)
}
// AddFile adds a single file to the files from list
func (f *Filter) AddFile(file string) error {
if f.files == nil {
f.files = make(filesMap)
}
file = strings.Trim(file, "/")
f.files[file] = struct{}{}
return nil
}
// Clear clears all the filter rules
func (f *Filter) Clear() {
f.rules = nil
}
// Include returns whether this object should be included into the
// sync or not
func (f *Filter) Include(remote string, size int64) bool {
// filesFrom takes precedence
if f.files != nil {
_, include := f.files[remote]
return include
}
if f.MinSize != 0 && size < f.MinSize {
return false
}
if f.MaxSize != 0 && size > f.MaxSize {
return false
}
for _, rule := range f.rules {
if rule.Match(remote) {
return rule.Include
}
}
return true
}
// forEachLine calls fn on every line in the file pointed to by path
//
// It ignores empty lines and lines starting with '#' or ';'
func forEachLine(path string, fn func(string) error) (err error) {
in, err := os.Open(path)
if err != nil {
return err
}
defer checkClose(in, &err)
scanner := bufio.NewScanner(in)
for scanner.Scan() {
line := scanner.Text()
line = strings.TrimSpace(line)
if len(line) == 0 || line[0] == '#' || line[0] == ';' {
continue
}
err := fn(line)
if err != nil {
return err
}
}
return scanner.Err()
}
// DumpFilters dumps the filters in textual form, 1 per line
func (f *Filter) DumpFilters() string {
rules := []string{}
for _, rule := range f.rules {
rules = append(rules, rule.String())
}
return strings.Join(rules, "\n")
}

324
fs/filter_test.go Normal file
View File

@@ -0,0 +1,324 @@
package fs
import (
"io/ioutil"
"os"
"strings"
"testing"
)
func TestNewFilterDefault(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
if f.DeleteExcluded != false {
t.Errorf("DeleteExcluded want false got %v", f.DeleteExcluded)
}
if f.MinSize != 0 {
t.Errorf("MinSize want 0 got %v", f.MinSize)
}
if f.MaxSize != 0 {
t.Errorf("MaxSize want 0 got %v", f.MaxSize)
}
if len(f.rules) != 0 {
t.Errorf("rules want non got %v", f.rules)
}
if f.files != nil {
t.Errorf("files want none got %v", f.files)
}
}
// return a pointer to the string
func stringP(s string) *string {
return &s
}
// testFile creates a temp file with the contents
func testFile(t *testing.T, contents string) *string {
out, err := ioutil.TempFile("", "filter_test")
if err != nil {
t.Fatal(err)
}
defer func() {
err := out.Close()
if err != nil {
t.Error(err)
}
}()
_, err = out.Write([]byte(contents))
if err != nil {
t.Fatal(err)
}
s := out.Name()
return &s
}
func TestNewFilterFull(t *testing.T) {
mins := int64(100 * 1024)
maxs := int64(1000 * 1024)
emptyString := ""
isFalse := false
isTrue := true
// Set up the input
deleteExcluded = &isTrue
filterRule = stringP("- filter1")
filterFrom = testFile(t, "#comment\n+ filter2\n- filter3\n")
excludeRule = stringP("exclude1")
excludeFrom = testFile(t, "#comment\nexclude2\nexclude3\n")
includeRule = stringP("include1")
includeFrom = testFile(t, "#comment\ninclude2\ninclude3\n")
filesFrom = testFile(t, "#comment\nfiles1\nfiles2\n")
minSize = SizeSuffix(mins)
maxSize = SizeSuffix(maxs)
rm := func(p string) {
err := os.Remove(p)
if err != nil {
t.Logf("error removing %q: %v", p, err)
}
}
// Reset the input
defer func() {
rm(*filterFrom)
rm(*excludeFrom)
rm(*includeFrom)
rm(*filesFrom)
minSize = 0
maxSize = 0
deleteExcluded = &isFalse
filterRule = &emptyString
filterFrom = &emptyString
excludeRule = &emptyString
excludeFrom = &emptyString
includeRule = &emptyString
includeFrom = &emptyString
filesFrom = &emptyString
}()
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
if f.DeleteExcluded != true {
t.Errorf("DeleteExcluded want true got %v", f.DeleteExcluded)
}
if f.MinSize != mins {
t.Errorf("MinSize want %v got %v", mins, f.MinSize)
}
if f.MaxSize != maxs {
t.Errorf("MaxSize want %v got %v", maxs, f.MaxSize)
}
got := f.DumpFilters()
want := `+ (^|/)include1$
- (^|/)[^/]*$
+ (^|/)include2$
+ (^|/)include3$
- (^|/)[^/]*$
- (^|/)exclude1$
- (^|/)exclude2$
- (^|/)exclude3$
- (^|/)filter1$
+ (^|/)filter2$
- (^|/)filter3$`
if got != want {
t.Errorf("rules want %s got %s", want, got)
}
if len(f.files) != 2 {
t.Errorf("files want 2 got %v", f.files)
}
for _, name := range []string{"files1", "files2"} {
_, ok := f.files[name]
if !ok {
t.Errorf("Didn't find file %q in f.files", name)
}
}
}
type includeTest struct {
in string
size int64
want bool
}
func testInclude(t *testing.T, f *Filter, tests []includeTest) {
for _, test := range tests {
got := f.Include(test.in, test.size)
if test.want != got {
t.Errorf("%q,%d: want %v got %v", test.in, test.size, test.want, got)
}
}
}
func TestNewFilterIncludeFiles(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
err = f.AddFile("file1.jpg")
if err != nil {
t.Error(err)
}
err = f.AddFile("/file2.jpg")
if err != nil {
t.Error(err)
}
testInclude(t, f, []includeTest{
{"file1.jpg", 0, true},
{"file2.jpg", 1, true},
{"potato/file2.jpg", 2, false},
{"file3.jpg", 3, false},
})
}
func TestNewFilterMinSize(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
f.MinSize = 100
testInclude(t, f, []includeTest{
{"file1.jpg", 100, true},
{"file2.jpg", 101, true},
{"potato/file2.jpg", 99, false},
})
}
func TestNewFilterMaxSize(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
f.MaxSize = 100
testInclude(t, f, []includeTest{
{"file1.jpg", 100, true},
{"file2.jpg", 101, false},
{"potato/file2.jpg", 99, true},
})
}
func TestNewFilterMatches(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
add := func(s string) {
err := f.AddRule(s)
if err != nil {
t.Fatal(err)
}
}
add("+ cleared")
add("!")
add("- file1.jpg")
add("+ file2.png")
add("+ *.jpg")
add("- *.png")
add("- /potato")
add("+ /sausage1")
add("+ /sausage2*")
add("+ /sausage3**")
add("- *")
testInclude(t, f, []includeTest{
{"cleared", 100, false},
{"file1.jpg", 100, false},
{"file2.png", 100, true},
{"afile2.png", 100, false},
{"file3.jpg", 101, true},
{"file4.png", 101, false},
{"potato", 101, false},
{"sausage1", 101, true},
{"sausage1/potato", 101, false},
{"sausage2potato", 101, true},
{"sausage2/potato", 101, false},
{"sausage3/potato", 101, true},
{"unicorn", 99, false},
})
}
func TestFilterForEachLine(t *testing.T) {
file := testFile(t, `; comment
one
# another comment
two
# indented comment
three
four
five
six `)
defer func() {
err := os.Remove(*file)
if err != nil {
t.Error(err)
}
}()
lines := []string{}
err := forEachLine(*file, func(s string) error {
lines = append(lines, s)
return nil
})
if err != nil {
t.Error(err)
}
got := strings.Join(lines, ",")
want := "one,two,three,four,five,six"
if want != got {
t.Errorf("want %q got %q", want, got)
}
}
func TestFilterMatchesFromDocs(t *testing.T) {
for _, test := range []struct {
glob string
included bool
file string
}{
{"file.jpg", true, "file.jpg"},
{"file.jpg", true, "directory/file.jpg"},
{"file.jpg", false, "afile.jpg"},
{"file.jpg", false, "directory/afile.jpg"},
{"/file.jpg", true, "file.jpg"},
{"/file.jpg", false, "afile.jpg"},
{"/file.jpg", false, "directory/file.jpg"},
{"*.jpg", true, "file.jpg"},
{"*.jpg", true, "directory/file.jpg"},
{"*.jpg", false, "file.jpg/anotherfile.png"},
{"dir/**", true, "dir/file.jpg"},
{"dir/**", true, "dir/dir1/dir2/file.jpg"},
{"dir/**", false, "directory/file.jpg"},
{"dir/**", false, "adir/file.jpg"},
{"l?ss", true, "less"},
{"l?ss", true, "lass"},
{"l?ss", false, "floss"},
{"h[ae]llo", true, "hello"},
{"h[ae]llo", true, "hallo"},
{"h[ae]llo", false, "hullo"},
{"{one,two}_potato", true, "one_potato"},
{"{one,two}_potato", true, "two_potato"},
{"{one,two}_potato", false, "three_potato"},
{"{one,two}_potato", false, "_potato"},
{"\\*.jpg", true, "*.jpg"},
{"\\\\.jpg", true, "\\.jpg"},
{"\\[one\\].jpg", true, "[one].jpg"},
} {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
err = f.Add(true, test.glob)
if err != nil {
t.Fatal(err)
}
err = f.Add(false, "*")
if err != nil {
t.Fatal(err)
}
included := f.Include(test.file, 0)
if included != test.included {
t.Logf("%q match %q: want %v got %v", test.glob, test.file, test.included, included)
}
}
}

155
fs/fs.go
View File

@@ -1,23 +1,38 @@
// File system interface
// Package fs is a generic file system interface for rclone object storage systems
package fs
import (
"fmt"
"io"
"log"
"path/filepath"
"regexp"
"time"
)
// Constants
const (
// UserAgent for Fs which can set it
UserAgent = "rclone/" + Version
// ModTimeNotSupported is a very large precision value to show
// mod time isn't supported on this Fs
ModTimeNotSupported = 100 * 365 * 24 * time.Hour
)
// Globals
var (
// Filesystem registry
fsRegistry []*FsInfo
fsRegistry []*Info
// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
ErrorNotFoundInConfigFile = fmt.Errorf("Didn't find section in config file")
ErrorCantCopy = fmt.Errorf("Can't copy object - incompatible remotes")
ErrorCantMove = fmt.Errorf("Can't copy object - incompatible remotes")
ErrorCantDirMove = fmt.Errorf("Can't copy directory - incompatible remotes")
ErrorDirExists = fmt.Errorf("Can't copy directory - destination already exists")
)
// Filesystem info
type FsInfo struct {
// Info information about a filesystem
type Info struct {
// Name of this fs
Name string
// Create a new file system. If root refers to an existing
@@ -30,7 +45,7 @@ type FsInfo struct {
Options []Option
}
// An options for a Fs
// Option is describes an option for the config wizard
type Option struct {
Name string
Help string
@@ -38,7 +53,7 @@ type Option struct {
Examples []OptionExample
}
// An example for an option
// OptionExample describes an example for an Option
type OptionExample struct {
Value string
Help string
@@ -47,22 +62,28 @@ type OptionExample struct {
// Register a filesystem
//
// Fs modules should use this in an init() function
func Register(info *FsInfo) {
func Register(info *Info) {
fsRegistry = append(fsRegistry, info)
}
// A Filesystem, describes the local filesystem and the remote object store
// Fs is the interface a cloud storage system must provide
type Fs interface {
// Name of the remote (as passed into NewFs)
Name() string
// Root of the remote (as passed into NewFs)
Root() string
// String returns a description of the FS
String() string
// List the Fs into a channel
List() ObjectsChan
// List the Fs directories/buckets/containers into a channel
// ListDir lists the Fs directories/buckets/containers into a channel
ListDir() DirChan
// Find the Object at remote. Returns nil if can't be found
// NewFsObject finds the Object at remote. Returns nil if can't be found
NewFsObject(remote string) Object
// Put in to the remote path with the modTime given of the given size
@@ -72,18 +93,21 @@ type Fs interface {
// nil and the error
Put(in io.Reader, remote string, modTime time.Time, size int64) (Object, error)
// Make the directory (container, bucket)
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
Mkdir() error
// Remove the directory (container, bucket) if empty
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
Rmdir() error
// Precision of the ModTimes in this Fs
Precision() time.Duration
}
// A filesystem like object which can either be a remote object or a
// local file/directory
// Object is a filesystem like object provided by an Fs
type Object interface {
// String returns a description of the Object
String() string
@@ -95,9 +119,11 @@ type Object interface {
Remote() string
// Md5sum returns the md5 checksum of the file
// If no Md5sum is available it returns ""
Md5sum() (string, error)
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
ModTime() time.Time
// SetModTime sets the metadata on the object to set the modification date
@@ -119,30 +145,74 @@ type Object interface {
Remove() error
}
// Optional interfaces
// Purger is an optional interfaces for Fs
type Purger interface {
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
Purge() error
}
// A channel of Objects
// Copier is an optional interface for Fs
type Copier interface {
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
Copy(src Object, remote string) (Object, error)
}
// Mover is an optional interface for Fs
type Mover interface {
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
Move(src Object, remote string) (Object, error)
}
// DirMover is an optional interface for Fs
type DirMover interface {
// DirMove moves src to this remote using server side move
// operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
DirMove(src Fs) error
}
// ObjectsChan is a channel of Objects
type ObjectsChan chan Object
// A slice of Objects
// Objects is a slice of Object~s
type Objects []Object
// A pair of Objects
// ObjectPair is a pair of Objects used to describe a potential copy
// operation.
type ObjectPair struct {
src, dst Object
}
// A channel of ObjectPair
// ObjectPairChan is a channel of ObjectPair
type ObjectPairChan chan ObjectPair
// A structure of directory/container/bucket lists
// Dir describes a directory for directory/container/bucket lists
type Dir struct {
Name string // name of the directory
When time.Time // modification or creation time - IsZero for unknown
@@ -150,16 +220,13 @@ type Dir struct {
Count int64 // number of objects -1 for unknown
}
// A channel of Dir objects
// DirChan is a channel of Dir objects
type DirChan chan *Dir
// Pattern to match a url
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
// Finds a FsInfo object for the name passed in
// Find looks for an Info object for the name passed in
//
// Services are looked up in the config file
func Find(name string) (*FsInfo, error) {
func Find(name string) (*Info, error) {
for _, item := range fsRegistry {
if item.Name == name {
return item, nil
@@ -168,53 +235,69 @@ func Find(name string) (*FsInfo, error) {
return nil, fmt.Errorf("Didn't find filing system for %q", name)
}
// Pattern to match an rclone url
var matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`)
// NewFs makes a new Fs object from the path
//
// The path is of the form service://path
// The path is of the form remote:path
//
// Services are looked up in the config file
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(path string) (Fs, error) {
parts := matcher.FindStringSubmatch(path)
fsName, configName, fsPath := "local", "local", path
if parts != nil {
if parts != nil && !isDriveLetter(parts[1]) {
configName, fsPath = parts[1], parts[2]
var err error
fsName, err = ConfigFile.GetValue(configName, "type")
if err != nil {
return nil, fmt.Errorf("Didn't find section in config file for %q", configName)
return nil, ErrorNotFoundInConfigFile
}
}
fs, err := Find(fsName)
if err != nil {
return nil, err
}
// change native directory separators to / if there are any
fsPath = filepath.ToSlash(fsPath)
return fs.NewFs(configName, fsPath)
}
// Outputs log for object
// OutputLog logs for an object
func OutputLog(o interface{}, text string, args ...interface{}) {
description := ""
if x, ok := o.(fmt.Stringer); ok {
description = x.String() + ": "
if o != nil {
description = fmt.Sprintf("%v: ", o)
}
out := fmt.Sprintf(text, args...)
log.Print(description + out)
}
// Write debuging output for this Object or Fs
// Debug writes debuging output for this Object or Fs
func Debug(o interface{}, text string, args ...interface{}) {
if Config.Verbose {
OutputLog(o, text, args...)
}
}
// Write log output for this Object or Fs
// Log writes log output for this Object or Fs
func Log(o interface{}, text string, args ...interface{}) {
if !Config.Quiet {
OutputLog(o, text, args...)
}
}
// ErrorLog writes error log output for this Object or Fs. It
// unconditionally logs a message regardless of Config.Quiet or
// Config.Verbose.
func ErrorLog(o interface{}, text string, args ...interface{}) {
OutputLog(o, text, args...)
}
// checkClose is a utility function used to check the return from
// Close in a defer statement.
func checkClose(c io.Closer, err *error) {

117
fs/glob.go Normal file
View File

@@ -0,0 +1,117 @@
// rsync style glob parser
package fs
import (
"bytes"
"fmt"
"regexp"
"strings"
)
// globToRegexp converts an rsync style glob to a regexp
//
// documented in filtering.md
func globToRegexp(glob string) (*regexp.Regexp, error) {
var re bytes.Buffer
if strings.HasPrefix(glob, "/") {
glob = glob[1:]
_, _ = re.WriteRune('^')
} else {
_, _ = re.WriteString("(^|/)")
}
consecutiveStars := 0
insertStars := func() error {
if consecutiveStars > 0 {
switch consecutiveStars {
case 1:
_, _ = re.WriteString(`[^/]*`)
case 2:
_, _ = re.WriteString(`.*`)
default:
return fmt.Errorf("too many stars in %q", glob)
}
}
consecutiveStars = 0
return nil
}
inBraces := false
inBrackets := 0
slashed := false
for _, c := range glob {
if slashed {
_, _ = re.WriteRune(c)
slashed = false
continue
}
if c != '*' {
err := insertStars()
if err != nil {
return nil, err
}
}
if inBrackets > 0 {
_, _ = re.WriteRune(c)
if c == '[' {
inBrackets++
}
if c == ']' {
inBrackets--
}
continue
}
switch c {
case '\\':
_, _ = re.WriteRune(c)
slashed = true
case '*':
consecutiveStars++
case '?':
_, _ = re.WriteString(`[^/]`)
case '[':
_, _ = re.WriteRune(c)
inBrackets++
case ']':
return nil, fmt.Errorf("mismatched ']' in glob %q", glob)
case '{':
if inBraces {
return nil, fmt.Errorf("can't nest '{' '}' in glob %q", glob)
}
inBraces = true
_, _ = re.WriteRune('(')
case '}':
if !inBraces {
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
}
_, _ = re.WriteRune(')')
inBraces = false
case ',':
if inBraces {
_, _ = re.WriteRune('|')
} else {
_, _ = re.WriteRune(c)
}
case '.', '+', '(', ')', '|', '^', '$': // regexp meta characters not dealt with above
_, _ = re.WriteRune('\\')
_, _ = re.WriteRune(c)
default:
_, _ = re.WriteRune(c)
}
}
err := insertStars()
if err != nil {
return nil, err
}
if inBrackets > 0 {
return nil, fmt.Errorf("mismatched '[' and ']' in glob %q", glob)
}
if inBraces {
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
}
_, _ = re.WriteRune('$')
result, err := regexp.Compile(re.String())
if err != nil {
return nil, fmt.Errorf("Bad glob pattern %q: %v (%q)", glob, err, re.String())
}
return result, nil
}

64
fs/glob_test.go Normal file
View File

@@ -0,0 +1,64 @@
package fs
import (
"strings"
"testing"
)
func TestGlobToRegexp(t *testing.T) {
for _, test := range []struct {
in string
want string
error string
}{
{``, `(^|/)$`, ``},
{`potato`, `(^|/)potato$`, ``},
{`potato,sausage`, `(^|/)potato,sausage$`, ``},
{`/potato`, `^potato$`, ``},
{`potato?sausage`, `(^|/)potato[^/]sausage$`, ``},
{`potat[oa]`, `(^|/)potat[oa]$`, ``},
{`potat[a-z]or`, `(^|/)potat[a-z]or$`, ``},
{`potat[[:alpha:]]or`, `(^|/)potat[[:alpha:]]or$`, ``},
{`'.' '+' '(' ')' '|' '^' '$'`, `(^|/)'\.' '\+' '\(' '\)' '\|' '\^' '\$'$`, ``},
{`*.jpg`, `(^|/)[^/]*\.jpg$`, ``},
{`a{b,c,d}e`, `(^|/)a(b|c|d)e$`, ``},
{`potato**`, `(^|/)potato.*$`, ``},
{`potato**sausage`, `(^|/)potato.*sausage$`, ``},
{`*.p[lm]`, `(^|/)[^/]*\.p[lm]$`, ``},
{`[\[\]]`, `(^|/)[\[\]]$`, ``},
{`***potato`, `(^|/)`, `too many stars`},
{`***`, `(^|/)`, `too many stars`},
{`ab]c`, `(^|/)`, `mismatched ']'`},
{`ab[c`, `(^|/)`, `mismatched '[' and ']'`},
{`ab{{cd`, `(^|/)`, `can't nest`},
{`ab{}}cd`, `(^|/)`, `mismatched '{' and '}'`},
{`ab}c`, `(^|/)`, `mismatched '{' and '}'`},
{`ab{c`, `(^|/)`, `mismatched '{' and '}'`},
{`*.{jpg,png,gif}`, `(^|/)[^/]*\.(jpg|png|gif)$`, ``},
{`[a--b]`, `(^|/)`, `Bad glob pattern`},
{`a\*b`, `(^|/)a\*b$`, ``},
{`a\\b`, `(^|/)a\\b$`, ``},
} {
gotRe, err := globToRegexp(test.in)
if test.error == "" {
if err != nil {
t.Errorf("%q: not expecting error: %v", test.in, err)
} else {
got := gotRe.String()
if test.want != got {
t.Errorf("%q: want %q got %q", test.in, test.want, got)
}
}
} else {
if err == nil {
t.Errorf("%q: expecting error but didn't get one", test.in)
} else {
got := err.Error()
if !strings.Contains(got, test.error) {
t.Errorf("%q: want error %q got %q", test.in, test.error, got)
}
}
}
}
}

View File

@@ -6,7 +6,8 @@ import (
"time"
)
// This defines a Limited Fs which can only return the Objects passed in from the Fs passed in
// Limited defines a Fs which can only return the Objects passed in
// from the Fs passed in
type Limited struct {
objects []Object
fs Fs
@@ -21,6 +22,16 @@ func NewLimited(fs Fs, objects ...Object) Fs {
return f
}
// Name is name of the remote (as passed into NewFs)
func (f *Limited) Name() string {
return f.fs.Name() // return name of underlying remote
}
// Root is the root of the remote (as passed into NewFs)
func (f *Limited) Root() string {
return f.fs.Root() // return root of underlying remote
}
// String returns a description of the FS
func (f *Limited) String() string {
return fmt.Sprintf("%s limited to %d objects", f.fs.String(), len(f.objects))
@@ -38,14 +49,14 @@ func (f *Limited) List() ObjectsChan {
return out
}
// List the Fs directories/buckets/containers into a channel
// ListDir lists the Fs directories/buckets/containers into a channel
func (f *Limited) ListDir() DirChan {
out := make(DirChan, Config.Checkers)
close(out)
return out
}
// Find the Object at remote. Returns nil if can't be found
// NewFsObject finds the Object at remote. Returns nil if can't be found
func (f *Limited) NewFsObject(remote string) Object {
for _, obj := range f.objects {
if obj.Remote() == remote {
@@ -68,15 +79,16 @@ func (f *Limited) Put(in io.Reader, remote string, modTime time.Time, size int64
return obj, obj.Update(in, modTime, size)
}
// Make the directory (container, bucket)
// Mkdir make the directory (container, bucket)
func (f *Limited) Mkdir() error {
// All directories are already made - just ignore
return nil
}
// Remove the directory (container, bucket) if empty
// Rmdir removes the directory (container, bucket) if empty
func (f *Limited) Rmdir() error {
return fmt.Errorf("Can't rmdir in limited fs")
// Ignore this in a limited fs
return nil
}
// Precision of the ModTimes in this Fs
@@ -84,5 +96,23 @@ func (f *Limited) Precision() time.Duration {
return f.fs.Precision()
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Limited) Copy(src Object, remote string) (Object, error) {
fCopy, ok := f.fs.(Copier)
if !ok {
return nil, ErrorCantCopy
}
return fCopy.Copy(src, remote)
}
// Check the interfaces are satisfied
var _ Fs = &Limited{}
var _ Copier = &Limited{}

57
fs/loghttp.go Normal file
View File

@@ -0,0 +1,57 @@
// A logging http transport
package fs
import (
"log"
"net/http"
"net/http/httputil"
)
const (
separatorReq = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
separatorResp = "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
)
// LoggedTransport is an http transport which logs the traffic
type LoggedTransport struct {
wrapped http.RoundTripper
logBody bool
}
// NewLoggedTransport wraps the transport passed in and logs all roundtrips
// including the body if logBody is set.
func NewLoggedTransport(transport http.RoundTripper, logBody bool) *LoggedTransport {
return &LoggedTransport{
wrapped: transport,
logBody: logBody,
}
}
// CancelRequest cancels an in-flight request by closing its
// connection. CancelRequest should only be called after RoundTrip has
// returned.
func (t *LoggedTransport) CancelRequest(req *http.Request) {
if wrapped, ok := t.wrapped.(interface {
CancelRequest(*http.Request)
}); ok {
log.Printf("CANCEL REQUEST %v", req)
wrapped.CancelRequest(req)
}
}
// RoundTrip implements the RoundTripper interface.
func (t *LoggedTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
buf, _ := httputil.DumpRequestOut(req, t.logBody)
log.Println(separatorReq)
log.Println("HTTP REQUEST")
log.Println(string(buf))
log.Println(separatorReq)
resp, err = t.wrapped.RoundTrip(req)
buf, _ = httputil.DumpResponse(resp, t.logBody)
log.Println(separatorResp)
log.Println("HTTP RESPONSE")
log.Println(string(buf))
log.Println(separatorResp)
return resp, err
}

View File

@@ -4,11 +4,16 @@ package fs
import (
"fmt"
"log"
"io"
"mime"
"path"
"sync"
"sync/atomic"
"time"
)
// Work out modify window for fses passed in - sets Config.ModifyWindow
// CalculateModifyWindow works out modify window for Fses passed in -
// sets Config.ModifyWindow
//
// This is the largest modify window of all the fses in use, and the
// user configured value
@@ -19,47 +24,69 @@ func CalculateModifyWindow(fs ...Fs) {
if precision > Config.ModifyWindow {
Config.ModifyWindow = precision
}
if precision == ModTimeNotSupported {
Debug(f, "Modify window not supported")
return
}
}
}
Debug(fs[0], "Modify window is %s\n", Config.ModifyWindow)
Debug(fs[0], "Modify window is %s", Config.ModifyWindow)
}
// Check the two files to see if the MD5sums are the same
// Md5sumsEqual checks to see if src == dst, but ignores empty strings
func Md5sumsEqual(src, dst string) bool {
if src == "" || dst == "" {
return true
}
return src == dst
}
// CheckMd5sums checks the two files to see if the MD5sums are the same
//
// Returns two bools, the first of which is equality and the second of
// which is true if either of the MD5SUMs were unset.
//
// May return an error which will already have been logged
//
// If an error is returned it will return false
func CheckMd5sums(src, dst Object) (bool, error) {
// If an error is returned it will return equal as false
func CheckMd5sums(src, dst Object) (equal bool, unset bool, err error) {
srcMd5, err := src.Md5sum()
if err != nil {
Stats.Error()
Log(src, "Failed to calculate src md5: %s", err)
return false, err
ErrorLog(src, "Failed to calculate src md5: %s", err)
return false, false, err
}
if srcMd5 == "" {
return true, true, nil
}
dstMd5, err := dst.Md5sum()
if err != nil {
Stats.Error()
Log(dst, "Failed to calculate dst md5: %s", err)
return false, err
ErrorLog(dst, "Failed to calculate dst md5: %s", err)
return false, false, err
}
if dstMd5 == "" {
return true, true, nil
}
// Debug("Src MD5 %s", srcMd5)
// Debug("Dst MD5 %s", obj.Hash)
return srcMd5 == dstMd5, nil
return Md5sumsEqual(srcMd5, dstMd5), false, nil
}
// Checks to see if the src and dst objects are equal by looking at
// Equal checks to see if the src and dst objects are equal by looking at
// size, mtime and MD5SUM
//
// If the src and dst size are different then it is considered to be
// not equal.
// not equal. If --size-only is in effect then this is the only check
// that is done.
//
// If the size is the same and the mtime is the same then it is
// considered to be equal. This is the heuristic rsync uses when
// not using --checksum.
// considered to be equal. This check is skipped if using --checksum.
//
// If the size is the same and and mtime is different or unreadable
// and the MD5SUM is the same then the file is considered to be equal.
// In this case the mtime on the dst is updated.
// If the size is the same and mtime is different, unreadable or
// --checksum is set and the MD5SUM is the same then the file is
// considered to be equal. In this case the mtime on the dst is
// updated if --checksum is not set.
//
// Otherwise the file is considered to be not equal including if there
// were errors reading info.
@@ -68,32 +95,74 @@ func Equal(src, dst Object) bool {
Debug(src, "Sizes differ")
return false
}
// Size the same so check the mtime
srcModTime := src.ModTime()
dstModTime := dst.ModTime()
dt := dstModTime.Sub(srcModTime)
ModifyWindow := Config.ModifyWindow
if dt >= ModifyWindow || dt <= -ModifyWindow {
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
} else {
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
if Config.SizeOnly {
Debug(src, "Sizes identical")
return true
}
var srcModTime time.Time
if !Config.CheckSum {
if Config.ModifyWindow == ModTimeNotSupported {
Debug(src, "Sizes identical")
return true
}
// Size the same so check the mtime
srcModTime = src.ModTime()
dstModTime := dst.ModTime()
dt := dstModTime.Sub(srcModTime)
ModifyWindow := Config.ModifyWindow
if dt >= ModifyWindow || dt <= -ModifyWindow {
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
} else {
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
return true
}
}
// mtime is unreadable or different but size is the same so
// check the MD5SUM
same, _ := CheckMd5sums(src, dst)
same, md5unset, _ := CheckMd5sums(src, dst)
if !same {
Debug(src, "Md5sums differ")
return false
}
// Size and MD5 the same but mtime different so update the
// mtime of the dst object here
dst.SetModTime(srcModTime)
if !Config.CheckSum {
// Size and MD5 the same but mtime different so update the
// mtime of the dst object here
dst.SetModTime(srcModTime)
}
Debug(src, "Size and MD5SUM of src and dst objects identical")
if md5unset {
Debug(src, "Size of src and dst objects identical")
} else {
Debug(src, "Size and MD5SUM of src and dst objects identical")
}
return true
}
// MimeType returns a guess at the mime type from the extension
func MimeType(o Object) string {
mimeType := mime.TypeByExtension(path.Ext(o.Remote()))
if mimeType == "" {
mimeType = "application/octet-stream"
}
return mimeType
}
// Used to remove a failed copy
//
// Returns whether the file was succesfully removed or not
func removeFailedCopy(dst Object) bool {
if dst == nil {
return false
}
Debug(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Debug(dst, "Failed to remove failed copy: %s", removeErr)
return false
}
return true
}
@@ -103,39 +172,100 @@ func Equal(src, dst Object) bool {
// call Copy() with dst nil on a pre-existing file then some filing
// systems (eg Drive) may duplicate the file.
func Copy(f Fs, dst, src Object) {
in0, err := src.Open()
if err != nil {
Stats.Error()
Log(src, "Failed to open: %s", err)
return
}
in := NewAccount(in0) // account the transfer
var actionTaken string
if dst != nil {
actionTaken = "Copied (updated existing)"
err = dst.Update(in, src.ModTime(), src.Size())
const maxTries = 10
tries := 0
doUpdate := dst != nil
var err, inErr error
tryAgain:
// Try server side copy first - if has optional interface and
// is same underlying remote
actionTaken := "Copied (server side copy)"
if fCopy, ok := f.(Copier); ok && src.Fs().Name() == f.Name() {
var newDst Object
newDst, err = fCopy.Copy(src, src.Remote())
if err == nil {
dst = newDst
}
} else {
actionTaken = "Copied (new)"
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
err = ErrorCantCopy
}
// If can't server side copy, do it manually
if err == ErrorCantCopy {
var in0 io.ReadCloser
in0, err = src.Open()
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to open: %s", err)
return
}
// On big files add a buffer
if src.Size() > 10<<20 {
in0, _ = newAsyncReader(in0, 4, 4<<20)
}
in := NewAccount(in0, src) // account the transfer
if doUpdate {
actionTaken = "Copied (updated existing)"
err = dst.Update(in, src.ModTime(), src.Size())
} else {
actionTaken = "Copied (new)"
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
}
inErr = in.Close()
}
// Retry if err returned a retry error
if r, ok := err.(Retry); ok && r.Retry() && tries < maxTries {
tries++
Log(src, "Received error: %v - retrying %d/%d", err, tries, maxTries)
if removeFailedCopy(dst) {
// If we removed dst, then nil it out and note we are not updating
dst = nil
doUpdate = false
}
goto tryAgain
}
inErr := in.Close()
if err == nil {
err = inErr
}
if err != nil {
Stats.Error()
Log(src, "Failed to copy: %s", err)
if dst != nil {
Debug(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Stats.Error()
Log(dst, "Failed to remove failed copy: %s", removeErr)
}
}
ErrorLog(src, "Failed to copy: %s", err)
removeFailedCopy(dst)
return
}
// Verify sizes are the same after transfer
if src.Size() != dst.Size() {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
ErrorLog(dst, "%s", err)
removeFailedCopy(dst)
return
}
// Verify md5sums are the same after transfer - ignoring blank md5sums
if !Config.SizeOnly {
srcMd5sum, md5sumErr := src.Md5sum()
if md5sumErr != nil {
Stats.Error()
ErrorLog(src, "Failed to read md5sum: %s", md5sumErr)
} else if srcMd5sum != "" {
dstMd5sum, md5sumErr := dst.Md5sum()
if md5sumErr != nil {
Stats.Error()
ErrorLog(dst, "Failed to read md5sum: %s", md5sumErr)
} else if !Md5sumsEqual(srcMd5sum, dstMd5sum) {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
ErrorLog(dst, "%s", err)
removeFailedCopy(dst)
return
}
}
}
Debug(src, actionTaken)
}
@@ -159,7 +289,7 @@ func checkOne(pair ObjectPair, out ObjectPairChan) {
out <- pair
}
// Read FsObjects~s on in send to out if they need uploading
// PairChecker reads Objects~s on in send to out if they need transferring.
//
// FIXME potentially doing lots of MD5SUMS at once
func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
@@ -172,27 +302,64 @@ func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
}
}
// Read FsObjects on in and copy them
func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
// PairCopier reads Objects on in and copies them.
func PairCopier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
defer wg.Done()
for pair := range in {
src := pair.src
Stats.Transferring(src)
Copy(fdst, pair.dst, src)
if Config.DryRun {
Debug(src, "Not copying as --dry-run")
} else {
Copy(fdst, pair.dst, src)
}
Stats.DoneTransferring(src)
}
}
// Delete all the files passed in the channel
func DeleteFiles(to_be_deleted ObjectsChan) {
// PairMover reads Objects on in and moves them if possible, or copies
// them if not
func PairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
defer wg.Done()
// See if we have Move available
fdstMover, haveMover := fdst.(Mover)
for pair := range in {
src := pair.src
dst := pair.dst
Stats.Transferring(src)
if Config.DryRun {
Debug(src, "Not moving as --dry-run")
} else if haveMover {
// Delete destination if it exists
if pair.dst != nil {
err := dst.Remove()
if err != nil {
Stats.Error()
ErrorLog(dst, "Couldn't delete: %v", err)
}
}
_, err := fdstMover.Move(src, src.Remote())
if err != nil {
Stats.Error()
ErrorLog(dst, "Couldn't move: %v", err)
} else {
Debug(src, "Moved")
}
} else {
Copy(fdst, pair.dst, src)
}
Stats.DoneTransferring(src)
}
}
// DeleteFiles removes all the files passed in the channel
func DeleteFiles(toBeDeleted ObjectsChan) {
var wg sync.WaitGroup
wg.Add(Config.Transfers)
var fs Fs
for i := 0; i < Config.Transfers; i++ {
go func() {
defer wg.Done()
for dst := range to_be_deleted {
fs = dst.Fs()
for dst := range toBeDeleted {
if Config.DryRun {
Debug(dst, "Not deleting as --dry-run")
} else {
@@ -201,7 +368,7 @@ func DeleteFiles(to_be_deleted ObjectsChan) {
Stats.DoneChecking(dst)
if err != nil {
Stats.Error()
Log(dst, "Couldn't delete: %s", err)
ErrorLog(dst, "Couldn't delete: %s", err)
} else {
Debug(dst, "Deleted")
}
@@ -209,15 +376,40 @@ func DeleteFiles(to_be_deleted ObjectsChan) {
}
}()
}
Log(fs, "Waiting for deletions to finish")
Log(nil, "Waiting for deletions to finish")
wg.Wait()
}
// Read a map of Object.Remote to Object for the given Fs
func readFilesMap(fs Fs) map[string]Object {
files := make(map[string]Object)
for o := range fs.List() {
remote := o.Remote()
if _, ok := files[remote]; !ok {
files[remote] = o
} else {
Log(o, "Duplicate file detected")
}
}
return files
}
// Same returns true if fdst and fsrc point to the same underlying Fs
func Same(fdst, fsrc Fs) bool {
return fdst.Name() == fsrc.Name() && fdst.Root() == fsrc.Root()
}
// Syncs fsrc into fdst
//
// If Delete is true then it deletes any files in fdst that aren't in fsrc
func Sync(fdst, fsrc Fs, Delete bool) error {
//
// If DoMove is true then files will be moved instead of copied
func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
if Same(fdst, fsrc) {
ErrorLog(fdst, "Nothing to do as source and destination are the same")
return nil
}
err := fdst.Mkdir()
if err != nil {
Stats.Error()
@@ -228,52 +420,60 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
// Read the destination files first
// FIXME could do this in parallel and make it use less memory
delFiles := make(map[string]Object)
for dst := range fdst.List() {
delFiles[dst.Remote()] = dst
}
delFiles := readFilesMap(fdst)
// Read source files checking them off against dest files
to_be_checked := make(ObjectPairChan, Config.Transfers)
to_be_uploaded := make(ObjectPairChan, Config.Transfers)
toBeChecked := make(ObjectPairChan, Config.Transfers)
toBeUploaded := make(ObjectPairChan, Config.Transfers)
var checkerWg sync.WaitGroup
checkerWg.Add(Config.Checkers)
for i := 0; i < Config.Checkers; i++ {
go PairChecker(to_be_checked, to_be_uploaded, &checkerWg)
go PairChecker(toBeChecked, toBeUploaded, &checkerWg)
}
var copierWg sync.WaitGroup
copierWg.Add(Config.Transfers)
for i := 0; i < Config.Transfers; i++ {
go Copier(to_be_uploaded, fdst, &copierWg)
if DoMove {
go PairMover(toBeUploaded, fdst, &copierWg)
} else {
go PairCopier(toBeUploaded, fdst, &copierWg)
}
}
go func() {
for src := range fsrc.List() {
remote := src.Remote()
dst, found := delFiles[remote]
if found {
delete(delFiles, remote)
to_be_checked <- ObjectPair{src, dst}
dst, dstFound := delFiles[remote]
if !Config.Filter.Include(remote, src.Size()) {
Debug(src, "Excluding from sync")
if dstFound && !Config.Filter.DeleteExcluded {
delete(delFiles, remote)
}
} else {
// No need to check since doesn't exist
to_be_uploaded <- ObjectPair{src, nil}
if dstFound {
delete(delFiles, remote)
toBeChecked <- ObjectPair{src, dst}
} else {
// No need to check since doesn't exist
toBeUploaded <- ObjectPair{src, nil}
}
}
}
close(to_be_checked)
close(toBeChecked)
}()
Log(fdst, "Waiting for checks to finish")
checkerWg.Wait()
close(to_be_uploaded)
close(toBeUploaded)
Log(fdst, "Waiting for transfers to finish")
copierWg.Wait()
// Delete files if asked
if Delete {
if Stats.Errored() {
Log(fdst, "Not deleting files as there were IO errors")
ErrorLog(fdst, "Not deleting files as there were IO errors")
return nil
}
@@ -290,41 +490,82 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
return nil
}
// Checks the files in fsrc and fdst according to Size and MD5SUM
// Sync fsrc into fdst
func Sync(fdst, fsrc Fs) error {
return syncCopyMove(fdst, fsrc, true, false)
}
// CopyDir copies fsrc into fdst
func CopyDir(fdst, fsrc Fs) error {
return syncCopyMove(fdst, fsrc, false, false)
}
// MoveDir moves fsrc into fdst
func MoveDir(fdst, fsrc Fs) error {
if Same(fdst, fsrc) {
ErrorLog(fdst, "Nothing to do as source and destination are the same")
return nil
}
// First attempt to use DirMover
if fdstDirMover, ok := fdst.(DirMover); ok && fsrc.Name() == fdst.Name() {
err := fdstDirMover.DirMove(fsrc)
Debug(fdst, "Using server side directory move")
switch err {
case ErrorCantDirMove, ErrorDirExists:
Debug(fdst, "Server side directory move failed - fallback to copy/delete: %v", err)
case nil:
Debug(fdst, "Server side directory move succeeded")
return nil
default:
Stats.Error()
ErrorLog(fdst, "Server side directory move failed: %v", err)
return err
}
}
// Now move the files
err := syncCopyMove(fdst, fsrc, false, true)
if err != nil || Stats.Errored() {
ErrorLog(fdst, "Not deleting files as there were IO errors")
return err
}
return Purge(fsrc)
}
// Check the files in fsrc and fdst according to Size and MD5SUM
func Check(fdst, fsrc Fs) error {
Log(fdst, "Building file list")
// Read the destination files first
// FIXME could do this in parallel and make it use less memory
dstFiles := make(map[string]Object)
for dst := range fdst.List() {
dstFiles[dst.Remote()] = dst
}
dstFiles := readFilesMap(fdst)
// Read the source files checking them against dstFiles
// FIXME could do this in parallel and make it use less memory
srcFiles := make(map[string]Object)
srcFiles := readFilesMap(fsrc)
// Move all the common files into commonFiles and delete then
// from srcFiles and dstFiles
commonFiles := make(map[string][]Object)
for src := range fsrc.List() {
remote := src.Remote()
for remote, src := range srcFiles {
if dst, ok := dstFiles[remote]; ok {
commonFiles[remote] = []Object{dst, src}
delete(srcFiles, remote)
delete(dstFiles, remote)
} else {
srcFiles[remote] = src
}
}
Log(fdst, "%d files not in %v", len(dstFiles), fsrc)
for _, dst := range dstFiles {
Stats.Error()
Log(dst, "File not in %v", fsrc)
ErrorLog(dst, "File not in %v", fsrc)
}
Log(fsrc, "%d files not in %s", len(srcFiles), fdst)
for _, src := range srcFiles {
Stats.Error()
Log(src, "File not in %v", fdst)
ErrorLog(src, "File not in %v", fdst)
}
checks := make(chan []Object, Config.Transfers)
@@ -346,17 +587,17 @@ func Check(fdst, fsrc Fs) error {
if src.Size() != dst.Size() {
Stats.DoneChecking(src)
Stats.Error()
Log(src, "Sizes differ")
ErrorLog(src, "Sizes differ")
continue
}
same, err := CheckMd5sums(src, dst)
same, _, err := CheckMd5sums(src, dst)
Stats.DoneChecking(src)
if err != nil {
continue
}
if !same {
Stats.Error()
Log(src, "Md5sums differ")
ErrorLog(src, "Md5sums differ")
}
Debug(src, "OK")
}
@@ -372,10 +613,10 @@ func Check(fdst, fsrc Fs) error {
return nil
}
// List the Fs to stdout
// ListFn lists the Fs to the supplied function
//
// Lists in parallel which may get them out of order
func List(f Fs) error {
func ListFn(f Fs, fn func(Object)) error {
in := f.List()
var wg sync.WaitGroup
wg.Add(Config.Checkers)
@@ -383,10 +624,7 @@ func List(f Fs) error {
go func() {
defer wg.Done()
for o := range in {
Stats.Checking(o)
modTime := o.ModTime()
Stats.DoneChecking(o)
fmt.Printf("%9d %19s %s\n", o.Size(), modTime.Format("2006-01-02 15:04:05.00000000"), o.Remote())
fn(o)
}
}()
}
@@ -394,15 +632,79 @@ func List(f Fs) error {
return nil
}
// List the directories/buckets/containers in the Fs to stdout
func ListDir(f Fs) error {
// mutex for synchronized output
var outMutex sync.Mutex
// Synchronized fmt.Fprintf
//
// Ignores errors from Fprintf
func syncFprintf(w io.Writer, format string, a ...interface{}) {
outMutex.Lock()
defer outMutex.Unlock()
_, _ = fmt.Fprintf(w, format, a...)
}
// List the Fs to the supplied writer
//
// Shows size and path
//
// Lists in parallel which may get them out of order
func List(f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
syncFprintf(w, "%9d %s\n", o.Size(), o.Remote())
})
}
// ListLong lists the Fs to the supplied writer
//
// Shows size, mod time and path
//
// Lists in parallel which may get them out of order
func ListLong(f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
Stats.Checking(o)
modTime := o.ModTime()
Stats.DoneChecking(o)
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
})
}
// Md5sum list the Fs to the supplied writer
//
// Produces the same output as the md5sum command
//
// Lists in parallel which may get them out of order
func Md5sum(f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
Stats.Checking(o)
md5sum, err := o.Md5sum()
Stats.DoneChecking(o)
if err != nil {
Debug(o, "Failed to read MD5: %v", err)
md5sum = "ERROR"
}
syncFprintf(w, "%32s %s\n", md5sum, o.Remote())
})
}
// Count counts the objects and their sizes in the Fs
func Count(f Fs) (objects int64, size int64, err error) {
err = ListFn(f, func(o Object) {
atomic.AddInt64(&objects, 1)
atomic.AddInt64(&size, o.Size())
})
return
}
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
func ListDir(f Fs, w io.Writer) error {
for dir := range f.ListDir() {
fmt.Printf("%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
syncFprintf(w, "%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
}
return nil
}
// Makes a destination directory or container
// Mkdir makes a destination directory or container
func Mkdir(f Fs) error {
err := f.Mkdir()
if err != nil {
@@ -412,7 +714,7 @@ func Mkdir(f Fs) error {
return nil
}
// Removes a container but not if not empty
// Rmdir removes a container but not if not empty
func Rmdir(f Fs) error {
if Config.DryRun {
Log(f, "Not deleting as dry run is set")
@@ -426,20 +728,25 @@ func Rmdir(f Fs) error {
return nil
}
// Removes a container and all of its contents
// Purge removes a container and all of its contents
//
// FIXME doesn't delete local directories
func Purge(f Fs) error {
var err error
if purger, ok := f.(Purger); ok {
err := purger.Purge()
if err != nil {
Stats.Error()
return err
if Config.DryRun {
Debug(f, "Not purging as --dry-run set")
} else {
err = purger.Purge()
}
} else {
// DeleteFiles and Rmdir observe --dry-run
DeleteFiles(f.List())
log.Printf("Deleting path")
Rmdir(f)
err = Rmdir(f)
}
if err != nil {
Stats.Error()
return err
}
return nil
}

617
fs/operations_test.go Normal file
View File

@@ -0,0 +1,617 @@
// Test rclone by doing real transactions to a storage provider to and
// from the local disk
package fs_test
import (
"bytes"
"flag"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest"
// Active file systems
_ "github.com/ncw/rclone/amazonclouddrive"
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/onedrive"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
)
// Globals
var (
localName, remoteName string
flocal, fremote fs.Fs
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
SubDir = flag.Bool("subdir", false, "Set to test with a sub directory")
Verbose = flag.Bool("verbose", false, "Set to enable logging")
finalise func()
)
// Write a file
func WriteFile(filePath, content string, t time.Time) {
// FIXME make directories?
filePath = path.Join(localName, filePath)
dirPath := path.Dir(filePath)
err := os.MkdirAll(dirPath, 0770)
if err != nil {
log.Fatalf("Failed to make directories %q: %v", dirPath, err)
}
err = ioutil.WriteFile(filePath, []byte(content), 0600)
if err != nil {
log.Fatalf("Failed to write file %q: %v", filePath, err)
}
err = os.Chtimes(filePath, t, t)
if err != nil {
log.Fatalf("Failed to chtimes file %q: %v", filePath, err)
}
}
var t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
var t2 = fstest.Time("2011-12-25T12:59:59.123456789Z")
var t3 = fstest.Time("2011-12-30T12:59:59.000000000Z")
func TestInit(t *testing.T) {
fs.LoadConfig()
fs.Config.Verbose = *Verbose
fs.Config.Quiet = !*Verbose
var err error
fremote, finalise, err = fstest.RandomRemote(*RemoteName, *SubDir)
if err != nil {
t.Fatalf("Failed to open remote %q: %v", *RemoteName, err)
}
t.Logf("Testing with remote %v", fremote)
localName, err = ioutil.TempDir("", "rclone")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
localName = filepath.ToSlash(localName)
t.Logf("Testing with local %q", localName)
flocal, err = fs.NewFs(localName)
if err != nil {
t.Fatalf("Failed to make %q: %v", remoteName, err)
}
}
func TestCalculateModifyWindow(t *testing.T) {
fs.CalculateModifyWindow(fremote, flocal)
t.Logf("ModifyWindow is %q", fs.Config.ModifyWindow)
}
func TestMkdir(t *testing.T) {
fstest.TestMkdir(t, fremote)
}
// Check dry run is working
func TestCopyWithDryRun(t *testing.T) {
WriteFile("sub dir/hello world", "hello world", t1)
fs.Config.DryRun = true
err := fs.CopyDir(fremote, flocal)
fs.Config.DryRun = false
if err != nil {
t.Fatalf("Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, []fstest.Item{}, fs.Config.ModifyWindow)
}
// Now without dry run
func TestCopy(t *testing.T) {
err := fs.CopyDir(fremote, flocal)
if err != nil {
t.Fatalf("Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Test a server side copy if possible, or the backup path if not
func TestServerSideCopy(t *testing.T) {
fremoteCopy, finaliseCopy, err := fstest.RandomRemote(*RemoteName, *SubDir)
if err != nil {
t.Fatalf("Failed to open remote copy %q: %v", *RemoteName, err)
}
defer finaliseCopy()
t.Logf("Server side copy (if possible) %v -> %v", fremote, fremoteCopy)
err = fs.CopyDir(fremoteCopy, fremote)
if err != nil {
t.Fatalf("Server Side Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremoteCopy, items, fs.Config.ModifyWindow)
}
func TestLsd(t *testing.T) {
var buf bytes.Buffer
err := fs.ListDir(fremote, &buf)
if err != nil {
t.Fatalf("ListDir failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, "sub dir\n") {
t.Fatalf("Result wrong %q", res)
}
}
// Now delete the local file and download it
func TestCopyAfterDelete(t *testing.T) {
err := os.Remove(localName + "/sub dir/hello world")
if err != nil {
t.Fatalf("Remove failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, []fstest.Item{}, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestCopyRedownload(t *testing.T) {
err := fs.CopyDir(flocal, fremote)
if err != nil {
t.Fatalf("Copy failed: %v", err)
}
items := []fstest.Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
// Clean the directory
cleanTempDir(t)
}
// Create a file and sync it. Change the last modified date and resync.
// If we're only doing sync by size and checksum, we expect nothing to
// to be transferred on the second sync.
func TestSyncBasedOnCheckSum(t *testing.T) {
cleanTempDir(t)
fs.Config.CheckSum = true
defer func() { fs.Config.CheckSum = false }()
WriteFile("check sum", "", t1)
local_items := []fstest.Item{
{Path: "check sum", Size: 0, ModTime: t1, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fs.Stats.ResetCounters()
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Initial sync failed: %v", err)
}
// We should have transferred exactly one file.
if fs.Stats.GetTransfers() != 1 {
t.Fatalf("Sync 1: want 1 transfer, got %d", fs.Stats.GetTransfers())
}
remote_items := local_items
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
err = os.Chtimes(localName+"/check sum", t2, t2)
if err != nil {
t.Fatalf("Chtimes failed: %v", err)
}
local_items = []fstest.Item{
{Path: "check sum", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fs.Stats.ResetCounters()
err = fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
// We should have transferred no files
if fs.Stats.GetTransfers() != 0 {
t.Fatalf("Sync 2: want 0 transfers, got %d", fs.Stats.GetTransfers())
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
cleanTempDir(t)
}
// Create a file and sync it. Change the last modified date and the
// file contents but not the size. If we're only doing sync by size
// only, we expect nothing to to be transferred on the second sync.
func TestSyncSizeOnly(t *testing.T) {
cleanTempDir(t)
fs.Config.SizeOnly = true
defer func() { fs.Config.SizeOnly = false }()
WriteFile("sizeonly", "potato", t1)
local_items := []fstest.Item{
{Path: "sizeonly", Size: 6, ModTime: t1, Md5sum: "8ee2027983915ec78acc45027d874316"},
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fs.Stats.ResetCounters()
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Initial sync failed: %v", err)
}
// We should have transferred exactly one file.
if fs.Stats.GetTransfers() != 1 {
t.Fatalf("Sync 1: want 1 transfer, got %d", fs.Stats.GetTransfers())
}
remote_items := local_items
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
// Update mtime, md5sum but not length of file
WriteFile("sizeonly", "POTATO", t2)
local_items = []fstest.Item{
{Path: "sizeonly", Size: 6, ModTime: t2, Md5sum: "8ac6f27a282e4938125482607ccfb55f"},
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fs.Stats.ResetCounters()
err = fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
// We should have transferred no files
if fs.Stats.GetTransfers() != 0 {
t.Fatalf("Sync 2: want 0 transfers, got %d", fs.Stats.GetTransfers())
}
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
cleanTempDir(t)
}
func TestSyncAfterChangingModtimeOnly(t *testing.T) {
WriteFile("empty space", "", t1)
err := os.Chtimes(localName+"/empty space", t2, t2)
if err != nil {
t.Fatalf("Chtimes failed: %v", err)
}
err = fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestSyncAfterAddingAFile(t *testing.T) {
WriteFile("potato", "------------------------------------------------------------", t3)
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
WriteFile("potato", "smaller but same date", t3)
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Sync after changing a file's contents, modtime but not length
func TestSyncAfterChangingContentsOnly(t *testing.T) {
if fremote.Precision() == fs.ModTimeNotSupported {
t.Logf("ModTimeNotSupported so forcing file to be a different size")
WriteFile("potato", "different size to make sure it syncs", t2)
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
}
WriteFile("potato", "SMALLER BUT SAME DATE", t2)
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Sync after removing a file and adding a file --dry-run
func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
WriteFile("potato2", "------------------------------------------------------------", t1)
err := os.Remove(localName + "/potato")
if err != nil {
t.Fatalf("Remove failed: %v", err)
}
fs.Config.DryRun = true
err = fs.Sync(fremote, flocal)
fs.Config.DryRun = false
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
before := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, before, fs.Config.ModifyWindow)
}
// Sync after removing a file and adding a file
func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Test with exclude
func TestSyncWithExclude(t *testing.T) {
WriteFile("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
fs.Config.Filter.MaxSize = 80
defer func() {
fs.Config.Filter.MaxSize = 0
}()
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Test with exclude and delete excluded
func TestSyncWithExcludeAndDeleleteExcluded(t *testing.T) {
fs.Config.Filter.MaxSize = 40
fs.Config.Filter.DeleteExcluded = true
reset := func() {
fs.Config.Filter.MaxSize = 0
fs.Config.Filter.DeleteExcluded = false
}
defer reset()
err := fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
// Tidy up
reset()
err = os.Remove(localName + "/enormous")
if err != nil {
t.Fatalf("Remove failed: %v", err)
}
err = fs.Sync(fremote, flocal)
if err != nil {
t.Fatalf("Sync failed: %v", err)
}
items = []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
}
// Test a server side move if possible, or the backup path if not
func TestServerSideMove(t *testing.T) {
fremoteMove, finaliseMove, err := fstest.RandomRemote(*RemoteName, *SubDir)
if err != nil {
t.Fatalf("Failed to open remote move %q: %v", *RemoteName, err)
}
defer finaliseMove()
t.Logf("Server side move (if possible) %v -> %v", fremote, fremoteMove)
// Start with a copy
err = fs.CopyDir(fremoteMove, fremote)
if err != nil {
t.Fatalf("Server Side Copy failed: %v", err)
}
// Remove one file
obj := fremoteMove.NewFsObject("potato2")
if obj == nil {
t.Fatalf("Failed to find potato2")
}
err = obj.Remove()
if err != nil {
t.Fatalf("Failed to remove object: %v", err)
}
// Do server side move
err = fs.MoveDir(fremoteMove, fremote)
if err != nil {
t.Fatalf("Server Side Move failed: %v", err)
}
items := []fstest.Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
fstest.CheckListingWithPrecision(t, fremote, items[:0], fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremoteMove, items, fs.Config.ModifyWindow)
// Move it back again, dst does not exist this time
err = fs.MoveDir(fremote, fremoteMove)
if err != nil {
t.Fatalf("Server Side Move 2 failed: %v", err)
}
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
fstest.CheckListingWithPrecision(t, fremoteMove, items[:0], fs.Config.ModifyWindow)
}
func TestLs(t *testing.T) {
var buf bytes.Buffer
err := fs.List(fremote, &buf)
if err != nil {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, " 0 empty space\n") {
t.Errorf("empty space missing: %q", res)
}
if !strings.Contains(res, " 60 potato2\n") {
t.Errorf("potato2 missing: %q", res)
}
}
func TestLsLong(t *testing.T) {
var buf bytes.Buffer
err := fs.ListLong(fremote, &buf)
if err != nil {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
lines := strings.Split(strings.Trim(res, "\n"), "\n")
if len(lines) != 2 {
t.Fatalf("Wrong number of lines in list: %q", lines)
}
timeFormat := "2006-01-02 15:04:05.000000000"
precision := fremote.Precision()
location := time.Now().Location()
checkTime := func(m, filename string, expected time.Time) {
modTime, err := time.ParseInLocation(timeFormat, m, location) // parse as localtime
if err != nil {
t.Errorf("Error parsing %q: %v", m, err)
} else {
dt, ok := fstest.CheckTimeEqualWithPrecision(expected, modTime, precision)
if !ok {
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", filename, dt, precision, modTime, expected, precision)
}
}
}
m1 := regexp.MustCompile(`(?m)^ 0 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) empty space$`)
if ms := m1.FindStringSubmatch(res); ms == nil {
t.Errorf("empty space missing: %q", res)
} else {
checkTime(ms[1], "empty space", t2.Local())
}
m2 := regexp.MustCompile(`(?m)^ 60 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) potato2$`)
if ms := m2.FindStringSubmatch(res); ms == nil {
t.Errorf("potato2 missing: %q", res)
} else {
checkTime(ms[1], "potato2", t1.Local())
}
}
func TestMd5sum(t *testing.T) {
var buf bytes.Buffer
err := fs.Md5sum(fremote, &buf)
if err != nil {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") &&
!strings.Contains(res, " empty space\n") {
t.Errorf("empty space missing: %q", res)
}
if !strings.Contains(res, "6548b156ea68a4e003e786df99eee76 potato2\n") &&
!strings.Contains(res, " potato2\n") {
t.Errorf("potato2 missing: %q", res)
}
}
func TestCount(t *testing.T) {
objects, size, err := fs.Count(fremote)
if err != nil {
t.Fatalf("Count failed: %v", err)
}
if objects != 2 {
t.Errorf("want 2 objects got %d", objects)
}
if size != 60 {
t.Errorf("want size 60 got %d", size)
}
}
func TestCheck(t *testing.T) {
// FIXME
}
// Clean the temporary directory
func cleanTempDir(t *testing.T) {
t.Logf("Cleaning temporary directory: %q", localName)
err := os.RemoveAll(localName)
if err != nil {
t.Logf("Failed to remove %q: %v", localName, err)
}
}
func TestFinalise(t *testing.T) {
finalise()
cleanTempDir(t)
}

31
fs/test_all.sh Executable file
View File

@@ -0,0 +1,31 @@
#!/bin/bash
go install
REMOTES="
TestSwift:
TestS3:
TestDrive:
TestGoogleCloudStorage:
TestDropbox:
TestAmazonCloudDrive:
TestOneDrive:
"
function test_remote {
args=$@
echo "@go test $args"
go test $args || {
echo "*** test $args FAILED ***"
exit 1
}
}
test_remote
test_remote --subdir
for remote in $REMOTES; do
test_remote --remote $remote
test_remote --remote $remote --subdir
done
echo "All OK"

4
fs/version.go Normal file
View File

@@ -0,0 +1,4 @@
package fs
// Version of rclone
const Version = "v1.24"

271
fstest/fstest.go Normal file
View File

@@ -0,0 +1,271 @@
// Package fstest provides utilities for testing the Fs
package fstest
// FIXME put name of test FS in Fs structure
import (
"io/ioutil"
"log"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/ncw/rclone/fs"
)
// Seed the random number generator
func init() {
rand.Seed(time.Now().UnixNano())
}
// Item represents an item for checking
type Item struct {
Path string
Md5sum string
ModTime time.Time
Size int64
WinPath string
}
// CheckTimeEqualWithPrecision checks the times are equal within the
// precision, returns the delta and a flag
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
dt := t0.Sub(t1)
if dt >= precision || dt <= -precision {
return dt, false
}
return dt, true
}
// CheckModTime checks the mod time to the given precision
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
dt, ok := CheckTimeEqualWithPrecision(modTime, i.ModTime, precision)
if !ok {
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision)
}
}
// Check checks all the attributes of the object are correct
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
if obj == nil {
t.Fatalf("Object is nil")
}
// Check attributes
Md5sum, err := obj.Md5sum()
if err != nil {
t.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
}
if !fs.Md5sumsEqual(i.Md5sum, Md5sum) {
t.Errorf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
}
if i.Size != obj.Size() {
t.Errorf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
}
i.CheckModTime(t, obj, obj.ModTime(), precision)
}
// Items represents all items for checking
type Items struct {
byName map[string]*Item
byNameAlt map[string]*Item
items []Item
}
// NewItems makes an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
byNameAlt: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[items[i].Path] = &items[i]
is.byNameAlt[items[i].WinPath] = &items[i]
}
return is
}
// Find checks off an item
func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
i, ok := is.byName[obj.Remote()]
if !ok {
i, ok = is.byNameAlt[obj.Remote()]
if !ok {
t.Errorf("Unexpected file %q", obj.Remote())
return
}
}
delete(is.byName, i.Path)
delete(is.byName, i.WinPath)
i.Check(t, obj, precision)
}
// Done checks all finished
func (is *Items) Done(t *testing.T) {
if len(is.byName) != 0 {
for name := range is.byName {
log.Printf("Not found %q", name)
}
t.Errorf("%d objects not found", len(is.byName))
}
}
// CheckListingWithPrecision checks the fs to see if it has the
// expected contents with the given precision.
func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision time.Duration) {
is := NewItems(items)
oldErrors := fs.Stats.GetErrors()
var objs []fs.Object
for i := 1; i <= 5; i++ {
objs = nil
for obj := range f.List() {
objs = append(objs, obj)
}
if len(objs) == len(items) {
break
}
t.Logf("Sleeping for 1 second for list eventual consistency: %d/5", i)
time.Sleep(1 * time.Second)
}
for _, obj := range objs {
if obj == nil {
t.Errorf("Unexpected nil in List()")
continue
}
is.Find(t, obj, precision)
}
is.Done(t)
// Don't notice an error when listing an empty directory
if len(items) == 0 && oldErrors == 0 && fs.Stats.GetErrors() == 1 {
fs.Stats.ResetErrors()
}
}
// CheckListing checks the fs to see if it has the expected contents
func CheckListing(t *testing.T, f fs.Fs, items []Item) {
precision := f.Precision()
CheckListingWithPrecision(t, f, items, precision)
}
// Time parses a time string or logs a fatal error
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// RandomString create a random string for test purposes
func RandomString(n int) string {
source := "abcdefghijklmnopqrstuvwxyz0123456789"
out := make([]byte, n)
for i := range out {
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
// LocalRemote creates a temporary directory name for local remotes
func LocalRemote() (path string, err error) {
path, err = ioutil.TempDir("", "rclone")
if err == nil {
// Now remove the directory
err = os.Remove(path)
}
path = filepath.ToSlash(path)
return
}
// RandomRemoteName makes a random bucket or subdirectory name
//
// Returns a random remote name plus the leaf name
func RandomRemoteName(remoteName string) (string, string, error) {
var err error
var leafName string
// Make a directory if remote name is null
if remoteName == "" {
remoteName, err = LocalRemote()
if err != nil {
return "", "", err
}
} else {
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
leafName = RandomString(32)
remoteName += leafName
}
return remoteName, leafName, nil
}
// RandomRemote makes a random bucket or subdirectory on the remote
//
// Call the finalise function returned to Purge the fs at the end (and
// the parent if necessary)
func RandomRemote(remoteName string, subdir bool) (fs.Fs, func(), error) {
var err error
var parentRemote fs.Fs
remoteName, _, err = RandomRemoteName(remoteName)
if err != nil {
return nil, nil, err
}
if subdir {
parentRemote, err = fs.NewFs(remoteName)
if err != nil {
return nil, nil, err
}
remoteName += "/" + RandomString(8)
}
remote, err := fs.NewFs(remoteName)
if err != nil {
return nil, nil, err
}
finalise := func() {
_ = fs.Purge(remote) // ignore error
if parentRemote != nil {
err = fs.Purge(parentRemote) // ignore error
if err != nil {
log.Printf("Failed to purge %v: %v", parentRemote, err)
}
}
}
return remote, finalise, nil
}
// TestMkdir tests Mkdir works
func TestMkdir(t *testing.T, remote fs.Fs) {
err := fs.Mkdir(remote)
if err != nil {
t.Fatalf("Mkdir failed: %v", err)
}
CheckListing(t, remote, []Item{})
}
// TestPurge tests Purge works
func TestPurge(t *testing.T, remote fs.Fs) {
err := fs.Purge(remote)
if err != nil {
t.Fatalf("Purge failed: %v", err)
}
CheckListing(t, remote, []Item{})
}
// TestRmdir tests Rmdir works
func TestRmdir(t *testing.T, remote fs.Fs) {
err := fs.Rmdir(remote)
if err != nil {
t.Fatalf("Rmdir failed: %v", err)
}
}

611
fstest/fstests/fstests.go Normal file
View File

@@ -0,0 +1,611 @@
// Package fstests provides generic tests for testing the Fs and Object interfaces
//
// Run go generate to write the tests for the remotes
package fstests
//go:generate go run gen_tests.go
import (
"bytes"
"crypto/md5"
"encoding/hex"
"flag"
"io"
"log"
"os"
"strings"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest"
)
var (
remote fs.Fs
// RemoteName should be set to the name of the remote for testing
RemoteName = ""
subRemoteName = ""
subRemoteLeaf = ""
// NilObject should be set to a nil Object from the Fs under test
NilObject fs.Object
file1 = fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: "file name.txt",
}
file2 = fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`,
WinPath: `hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _/z.txt`,
}
dumpHeaders = flag.Bool("dump-headers", false, "Dump HTTP headers - may contain sensitive info")
dumpBodies = flag.Bool("dump-bodies", false, "Dump HTTP headers and bodies - may contain sensitive info")
)
func init() {
flag.StringVar(&RemoteName, "remote", "", "Set this to override the default remote name (eg s3:)")
}
// TestInit tests basic intitialisation
func TestInit(t *testing.T) {
var err error
fs.LoadConfig()
fs.Config.Verbose = false
fs.Config.Quiet = true
fs.Config.DumpHeaders = *dumpHeaders
fs.Config.DumpBodies = *dumpBodies
t.Logf("Using remote %q", RemoteName)
if RemoteName == "" {
RemoteName, err = fstest.LocalRemote()
if err != nil {
log.Fatalf("Failed to create tmp dir: %v", err)
}
}
subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(RemoteName)
if err != nil {
t.Fatalf("Couldn't make remote name: %v", err)
}
remote, err = fs.NewFs(subRemoteName)
if err == fs.ErrorNotFoundInConfigFile {
log.Printf("Didn't find %q in config file - skipping tests", RemoteName)
return
}
if err != nil {
t.Fatalf("Couldn't start FS: %v", err)
}
fstest.TestMkdir(t, remote)
}
func skipIfNotOk(t *testing.T) {
if remote == nil {
t.Skip("FS not configured")
}
}
// TestFsString tests the String method
func TestFsString(t *testing.T) {
skipIfNotOk(t)
str := remote.String()
if str == "" {
t.Fatal("Bad fs.String()")
}
}
// TestFsRmdirEmpty tests deleting an empty directory
func TestFsRmdirEmpty(t *testing.T) {
skipIfNotOk(t)
fstest.TestRmdir(t, remote)
}
// TestFsRmdirNotFound tests deleting a non existent directory
func TestFsRmdirNotFound(t *testing.T) {
skipIfNotOk(t)
err := remote.Rmdir()
if err == nil {
t.Fatalf("Expecting error on Rmdir non existent")
}
}
// TestFsMkdir tests tests making a directory
func TestFsMkdir(t *testing.T) {
skipIfNotOk(t)
fstest.TestMkdir(t, remote)
fstest.TestMkdir(t, remote)
}
// TestFsListEmpty tests listing an empty directory
func TestFsListEmpty(t *testing.T) {
skipIfNotOk(t)
fstest.CheckListing(t, remote, []fstest.Item{})
}
// TestFsListDirEmpty tests listing the directories from an empty directory
func TestFsListDirEmpty(t *testing.T) {
skipIfNotOk(t)
for obj := range remote.ListDir() {
t.Errorf("Found unexpected item %q", obj.Name)
}
}
// TestFsNewFsObjectNotFound tests not finding a object
func TestFsNewFsObjectNotFound(t *testing.T) {
skipIfNotOk(t)
if remote.NewFsObject("potato") != nil {
t.Fatal("Didn't expect to find object")
}
}
func findObject(t *testing.T, Name string) fs.Object {
obj := remote.NewFsObject(Name)
if obj == nil {
t.Fatalf("Object not found: %q", Name)
}
return obj
}
func testPut(t *testing.T, file *fstest.Item) {
buf := bytes.NewBufferString(fstest.RandomString(100))
hash := md5.New()
in := io.TeeReader(buf, hash)
file.Size = int64(buf.Len())
obj, err := remote.Put(in, file.Path, file.ModTime, file.Size)
if err != nil {
t.Fatal("Put error", err)
}
file.Md5sum = hex.EncodeToString(hash.Sum(nil))
file.Check(t, obj, remote.Precision())
// Re-read the object and check again
obj = findObject(t, file.Path)
file.Check(t, obj, remote.Precision())
}
// TestFsPutFile1 tests putting a file
func TestFsPutFile1(t *testing.T) {
skipIfNotOk(t)
testPut(t, &file1)
}
// TestFsPutFile2 tests putting a file into a subdirectory
func TestFsPutFile2(t *testing.T) {
skipIfNotOk(t)
testPut(t, &file2)
}
// TestFsListDirFile2 tests the files are correctly uploaded
func TestFsListDirFile2(t *testing.T) {
skipIfNotOk(t)
found := false
for obj := range remote.ListDir() {
if obj.Name != `hello? sausage` && obj.Name != `hello_ sausage` {
t.Errorf("Found unexpected item %q", obj.Name)
} else {
found = true
}
}
if !found {
t.Errorf("Didn't find %q", `hello? sausage`)
}
}
// TestFsListDirRoot tests that DirList works in the root
func TestFsListDirRoot(t *testing.T) {
skipIfNotOk(t)
rootRemote, err := fs.NewFs(RemoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", RemoteName, err)
}
found := false
for obj := range rootRemote.ListDir() {
if obj.Name == subRemoteLeaf {
found = true
}
}
if !found {
t.Errorf("Didn't find %q", subRemoteLeaf)
}
}
// TestFsListRoot tests List works in the root
func TestFsListRoot(t *testing.T) {
skipIfNotOk(t)
rootRemote, err := fs.NewFs(RemoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", RemoteName, err)
}
// Should either find file1 and file2 or nothing
found1 := false
f1 := subRemoteLeaf + "/" + file1.Path
found2 := false
f2 := subRemoteLeaf + "/" + file2.Path
f2Alt := subRemoteLeaf + "/" + file2.WinPath
count := 0
errors := fs.Stats.GetErrors()
for obj := range rootRemote.List() {
count++
if obj.Remote() == f1 {
found1 = true
}
if obj.Remote() == f2 || obj.Remote() == f2Alt {
found2 = true
}
}
errors -= fs.Stats.GetErrors()
if count == 0 {
if errors == 0 {
t.Error("Expecting error if count==0")
}
return
}
if found1 && found2 {
if errors != 0 {
t.Error("Not expecting error if found")
}
return
}
t.Errorf("Didn't find %q (%v) and %q (%v) or no files (count %d)", f1, found1, f2, found2, count)
}
// TestFsListFile1 tests file present
func TestFsListFile1(t *testing.T) {
skipIfNotOk(t)
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
}
// TestFsNewFsObject tests NewFsObject
func TestFsNewFsObject(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
file1.Check(t, obj, remote.Precision())
}
// TestFsListFile1and2 tests two files present
func TestFsListFile1and2(t *testing.T) {
skipIfNotOk(t)
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
}
// TestFsCopy tests Copy
func TestFsCopy(t *testing.T) {
skipIfNotOk(t)
// Check have Copy
_, ok := remote.(fs.Copier)
if !ok {
t.Skip("FS has no Copier interface")
}
var file1Copy = file1
file1Copy.Path += "-copy"
// do the copy
src := findObject(t, file1.Path)
dst, err := remote.(fs.Copier).Copy(src, file1Copy.Path)
if err != nil {
t.Fatalf("Copy failed: %v (%#v)", err, err)
}
// check file exists in new listing
fstest.CheckListing(t, remote, []fstest.Item{file1, file2, file1Copy})
// Check dst lightly - list above has checked ModTime/Md5sum
if dst.Remote() != file1Copy.Path {
t.Errorf("object path: want %q got %q", file1Copy.Path, dst.Remote())
}
// Delete copy
err = dst.Remove()
if err != nil {
t.Fatal("Remove copy error", err)
}
}
// TestFsMove tests Move
func TestFsMove(t *testing.T) {
skipIfNotOk(t)
// Check have Move
_, ok := remote.(fs.Mover)
if !ok {
t.Skip("FS has no Mover interface")
}
var file1Move = file1
file1Move.Path += "-move"
// do the move
src := findObject(t, file1.Path)
dst, err := remote.(fs.Mover).Move(src, file1Move.Path)
if err != nil {
t.Fatalf("Move failed: %v", err)
}
// check file exists in new listing
fstest.CheckListing(t, remote, []fstest.Item{file2, file1Move})
// Check dst lightly - list above has checked ModTime/Md5sum
if dst.Remote() != file1Move.Path {
t.Errorf("object path: want %q got %q", file1Move.Path, dst.Remote())
}
// move it back
src = findObject(t, file1Move.Path)
_, err = remote.(fs.Mover).Move(src, file1.Path)
if err != nil {
t.Errorf("Move failed: %v", err)
}
// check file exists in new listing
fstest.CheckListing(t, remote, []fstest.Item{file2, file1})
}
// Move src to this remote using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
// TestFsDirMove tests DirMove
func TestFsDirMove(t *testing.T) {
skipIfNotOk(t)
// Check have DirMove
_, ok := remote.(fs.DirMover)
if !ok {
t.Skip("FS has no DirMover interface")
}
// Check it can't move onto itself
err := remote.(fs.DirMover).DirMove(remote)
if err != fs.ErrorDirExists {
t.Errorf("Expecting fs.ErrorDirExists got: %v", err)
}
// new remote
newRemote, removeNewRemote, err := fstest.RandomRemote(RemoteName, false)
if err != nil {
t.Fatalf("Failed to create remote: %v", err)
}
defer removeNewRemote()
// try the move
err = newRemote.(fs.DirMover).DirMove(remote)
if err != nil {
t.Errorf("Failed to DirMove: %v", err)
}
// check remotes
// FIXME: Prints errors.
fstest.CheckListing(t, remote, []fstest.Item{})
fstest.CheckListing(t, newRemote, []fstest.Item{file2, file1})
// move it back
err = remote.(fs.DirMover).DirMove(newRemote)
if err != nil {
t.Errorf("Failed to DirMove: %v", err)
}
// check remotes
fstest.CheckListing(t, remote, []fstest.Item{file2, file1})
fstest.CheckListing(t, newRemote, []fstest.Item{})
}
// TestFsRmdirFull tests removing a non empty directory
func TestFsRmdirFull(t *testing.T) {
skipIfNotOk(t)
err := remote.Rmdir()
if err == nil {
t.Fatalf("Expecting error on RMdir on non empty remote")
}
}
// TestFsPrecision tests the Precision of the Fs
func TestFsPrecision(t *testing.T) {
skipIfNotOk(t)
precision := remote.Precision()
if precision == fs.ModTimeNotSupported {
return
}
if precision > time.Second || precision < 0 {
t.Fatalf("Precision out of range %v", precision)
}
// FIXME check expected precision
}
// TestObjectString tests the Object String method
func TestObjectString(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
s := obj.String()
if s != file1.Path {
t.Errorf("String() wrong %v != %v", s, file1.Path)
}
obj = NilObject
s = obj.String()
if s != "<nil>" {
t.Errorf("String() wrong %v != %v", s, "<nil>")
}
}
// TestObjectFs tests the object can be found
func TestObjectFs(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if obj.Fs() != remote {
t.Errorf("Fs is wrong %v != %v", obj.Fs(), remote)
}
}
// TestObjectRemote tests the Remote is correct
func TestObjectRemote(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if obj.Remote() != file1.Path {
t.Errorf("Remote is wrong %v != %v", obj.Remote(), file1.Path)
}
}
// TestObjectMd5sum tests the MD5SUM of the object is correct
func TestObjectMd5sum(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
Md5sum, err := obj.Md5sum()
if err != nil {
t.Errorf("Error in Md5sum: %v", err)
}
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
}
}
// TestObjectModTime tests the ModTime of the object is correct
func TestObjectModTime(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
}
// TestObjectSetModTime tests that SetModTime works
func TestObjectSetModTime(t *testing.T) {
skipIfNotOk(t)
newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z")
obj := findObject(t, file1.Path)
obj.SetModTime(newModTime)
file1.ModTime = newModTime
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
// And make a new object and read it from there too
TestObjectModTime(t)
}
// TestObjectSize tests that Size works
func TestObjectSize(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if obj.Size() != file1.Size {
t.Errorf("Size is wrong %v != %v", obj.Size(), file1.Size)
}
}
// TestObjectOpen tests that Open works
func TestObjectOpen(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
in, err := obj.Open()
if err != nil {
t.Fatalf("Open() return error: %v", err)
}
hash := md5.New()
n, err := io.Copy(hash, in)
if err != nil {
t.Fatalf("io.Copy() return error: %v", err)
}
if n != file1.Size {
t.Fatalf("Read wrong number of bytes %d != %d", n, file1.Size)
}
err = in.Close()
if err != nil {
t.Fatalf("in.Close() return error: %v", err)
}
Md5sum := hex.EncodeToString(hash.Sum(nil))
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
}
}
// TestObjectUpdate tests that Update works
func TestObjectUpdate(t *testing.T) {
skipIfNotOk(t)
buf := bytes.NewBufferString(fstest.RandomString(200))
hash := md5.New()
in := io.TeeReader(buf, hash)
file1.Size = int64(buf.Len())
obj := findObject(t, file1.Path)
err := obj.Update(in, file1.ModTime, file1.Size)
if err != nil {
t.Fatal("Update error", err)
}
file1.Md5sum = hex.EncodeToString(hash.Sum(nil))
file1.Check(t, obj, remote.Precision())
// Re-read the object and check again
obj = findObject(t, file1.Path)
file1.Check(t, obj, remote.Precision())
}
// TestObjectStorable tests that Storable works
func TestObjectStorable(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
if !obj.Storable() {
t.Fatalf("Expecting %v to be storable", obj)
}
}
// TestLimitedFs tests that a LimitedFs is created
func TestLimitedFs(t *testing.T) {
skipIfNotOk(t)
remoteName := subRemoteName + "/" + file2.Path
file2Copy := file2
file2Copy.Path = "z.txt"
fileRemote, err := fs.NewFs(remoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", remoteName, err)
}
fstest.CheckListing(t, fileRemote, []fstest.Item{file2Copy})
_, ok := fileRemote.(*fs.Limited)
if !ok {
t.Errorf("%v is not a fs.Limited", fileRemote)
}
}
// TestLimitedFsNotFound tests that a LimitedFs is not created if no object
func TestLimitedFsNotFound(t *testing.T) {
skipIfNotOk(t)
remoteName := subRemoteName + "/not found.txt"
fileRemote, err := fs.NewFs(remoteName)
if err != nil {
t.Fatalf("Failed to make remote %q: %v", remoteName, err)
}
fstest.CheckListing(t, fileRemote, []fstest.Item{})
_, ok := fileRemote.(*fs.Limited)
if ok {
t.Errorf("%v is is a fs.Limited", fileRemote)
}
}
// TestObjectRemove tests Remove
func TestObjectRemove(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
err := obj.Remove()
if err != nil {
t.Fatal("Remove error", err)
}
fstest.CheckListing(t, remote, []fstest.Item{file2})
}
// TestObjectPurge tests Purge
func TestObjectPurge(t *testing.T) {
skipIfNotOk(t)
fstest.TestPurge(t, remote)
err := fs.Purge(remote)
if err == nil {
t.Fatal("Expecting error after on second purge")
}
}
// TestFinalise tidies up after the previous tests
func TestFinalise(t *testing.T) {
skipIfNotOk(t)
if strings.HasPrefix(RemoteName, "/") {
// Remove temp directory
err := os.Remove(RemoteName)
if err != nil {
log.Printf("Failed to remove %q: %v\n", RemoteName, err)
}
}
}

136
fstest/fstests/gen_tests.go Normal file
View File

@@ -0,0 +1,136 @@
// +build ignore
// Make the test files from fstests.go
package main
import (
"bufio"
"html/template"
"log"
"os"
"os/exec"
"regexp"
"strings"
)
// Search fstests.go and return all the test function names
func findTestFunctions() []string {
fns := []string{}
matcher := regexp.MustCompile(`^func\s+(Test.*?)\(`)
in, err := os.Open("fstests.go")
if err != nil {
log.Fatalf("Couldn't open fstests.go: %v", err)
}
defer in.Close()
scanner := bufio.NewScanner(in)
for scanner.Scan() {
line := scanner.Text()
matches := matcher.FindStringSubmatch(line)
if len(matches) > 0 {
fns = append(fns, matches[1])
}
}
if err := scanner.Err(); err != nil {
log.Fatalf("Error scanning file: %v", err)
}
return fns
}
// Data to substitute
type Data struct {
Regenerate string
FsName string
UpperFsName string
TestName string
Fns []string
}
var testProgram = `
// Test {{ .UpperFsName }} filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: {{ .Regenerate }}
package {{ .FsName }}_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/{{ .FsName }}"
)
func init() {
fstests.NilObject = fs.Object((*{{ .FsName }}.Object)(nil))
fstests.RemoteName = "{{ .TestName }}"
}
// Generic tests for the Fs
{{ range $fn := .Fns }}func {{ $fn }}(t *testing.T){ fstests.{{ $fn }}(t) }
{{ end }}
`
// Generate test file piping it through gofmt
func generateTestProgram(t *template.Template, fns []string, Fsname string) {
fsname := strings.ToLower(Fsname)
TestName := "Test" + Fsname + ":"
outfile := "../../" + fsname + "/" + fsname + "_test.go"
if fsname == "local" {
TestName = ""
}
data := Data{
Regenerate: "make gen_tests",
FsName: fsname,
UpperFsName: Fsname,
TestName: TestName,
Fns: fns,
}
cmd := exec.Command("gofmt")
log.Printf("Writing %q", outfile)
out, err := os.Create(outfile)
if err != nil {
log.Fatal(err)
}
cmd.Stdout = out
gofmt, err := cmd.StdinPipe()
if err != nil {
log.Fatal(err)
}
if err = cmd.Start(); err != nil {
log.Fatal(err)
}
if err = t.Execute(gofmt, data); err != nil {
log.Fatal(err)
}
if err = gofmt.Close(); err != nil {
log.Fatal(err)
}
if err = cmd.Wait(); err != nil {
log.Fatal(err)
}
if err = out.Close(); err != nil {
log.Fatal(err)
}
}
func main() {
fns := findTestFunctions()
t := template.Must(template.New("main").Parse(testProgram))
generateTestProgram(t, fns, "Local")
generateTestProgram(t, fns, "Swift")
generateTestProgram(t, fns, "S3")
generateTestProgram(t, fns, "Drive")
generateTestProgram(t, fns, "GoogleCloudStorage")
generateTestProgram(t, fns, "Dropbox")
generateTestProgram(t, fns, "AmazonCloudDrive")
generateTestProgram(t, fns, "OneDrive")
log.Printf("Done")
}

View File

@@ -0,0 +1,640 @@
// Package googlecloudstorage provides an interface to Google Cloud Storage
package googlecloudstorage
/*
Notes
Can't set Updated but can set Metadata on object creation
Patch needs full_control not just read_write
FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 error
- https://code.google.com/p/google-api-go-client/issues/detail?id=64
*/
import (
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"log"
"net/http"
"path"
"regexp"
"strings"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/api/storage/v1"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
)
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneClientSecret = "8p/yms3OlNXE9OTDl/HLypf9gdiJ5cT3"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 256 // chunk size to read directory listings
)
var (
// Description of how to auth for this app
storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageFullControlScope},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: fs.Reveal(rcloneClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "google cloud storage",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config(name, storageConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: oauthutil.ConfigClientID,
Help: "Google Application Client Id - leave blank normally.",
}, {
Name: oauthutil.ConfigClientSecret,
Help: "Google Application Client Secret - leave blank normally.",
}, {
Name: "project_number",
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
}, {
Value: "bucketOwnerFullControl",
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
}, {
Value: "bucketOwnerRead",
Help: "Object owner gets OWNER access, and project team owners get READER access.",
}, {
Value: "private",
Help: "Object owner gets OWNER access [default if left blank].",
}, {
Value: "projectPrivate",
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Object owner gets OWNER access, and all Users get READER access.",
}},
}, {
Name: "bucket_acl",
Help: "Access Control List for new buckets.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
}, {
Value: "private",
Help: "Project team owners get OWNER access [default if left blank].",
}, {
Value: "projectPrivate",
Help: "Project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Project team owners get OWNER access, and all Users get READER access.",
}, {
Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
}},
}},
})
}
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
root string // the path we are working on if any
projectNumber string // used for finding buckets
objectAcl string // used when creating new objects
bucketAcl string // used when creating new buckets
}
// Object describes a storage object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
url string // download path
md5sum string // The MD5Sum of the object
bytes int64 // Bytes in the object
modTime time.Time // Modified time of the object
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.root == "" {
return fmt.Sprintf("Storage bucket %s", f.bucket)
}
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
}
// Pattern to match a storage path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a storage 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = fmt.Errorf("Couldn't find bucket in storage path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
oAuthClient, err := oauthutil.NewClient(name, storageConfig)
if err != nil {
log.Fatalf("Failed to configure Google Cloud Storage: %v", err)
}
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
bucket: bucket,
root: directory,
projectNumber: fs.ConfigFile.MustValue(name, "project_number"),
objectAcl: fs.ConfigFile.MustValue(name, "object_acl"),
bucketAcl: fs.ConfigFile.MustValue(name, "bucket_acl"),
}
if f.objectAcl == "" {
f.objectAcl = "private"
}
if f.bucketAcl == "" {
f.bucketAcl = "private"
}
// Create a new authorized Drive client.
f.client = oAuthClient
f.svc, err = storage.New(f.client)
if err != nil {
return nil, fmt.Errorf("Couldn't create Google Cloud Storage client: %s", err)
}
if f.root != "" {
f.root += "/"
// Check to see if the object exists
_, err = f.svc.Objects.Get(bucket, directory).Do()
if err == nil {
remote := path.Base(directory)
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
obj := f.NewFsObject(remote)
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
}
}
return f, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *Fs) newFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
o.setMetaData(info)
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
// logged already FsDebug("Failed to read info: %s", err)
return nil
}
}
return o
}
// NewFsObject returns an FsObject from a path
//
// May return nil if an error occurred
func (f *Fs) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
func (f *Fs) list(directories bool, fn func(string, *storage.Object)) {
list := f.svc.Objects.List(f.bucket).Prefix(f.root).MaxResults(listChunks)
if directories {
list = list.Delimiter("/")
}
rootLength := len(f.root)
for {
objects, err := list.Do()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err)
return
}
if !directories {
for _, object := range objects.Items {
if !strings.HasPrefix(object.Name, f.root) {
fs.Log(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
fn(remote, object)
}
} else {
var object storage.Object
for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") {
continue
}
fn(prefix[:len(prefix)-1], &object)
}
}
if objects.NextPageToken == "" {
break
}
list.PageToken(objects.NextPageToken)
}
}
// List walks the path returning a channel of FsObjects
func (f *Fs) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
if f.bucket == "" {
// Return no objects at top level list
close(out)
fs.Stats.Error()
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
} else {
// List the objects
go func() {
defer close(out)
f.list(false, func(remote string, object *storage.Object) {
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
}()
}
return out
}
// ListDir lists the buckets
func (f *Fs) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
if f.bucket == "" {
// List the buckets
go func() {
defer close(out)
if f.projectNumber == "" {
fs.Stats.Error()
fs.ErrorLog(f, "Can't list buckets without project number")
return
}
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
for {
buckets, err := listBuckets.Do()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list buckets: %v", err)
break
} else {
for _, bucket := range buckets.Items {
out <- &fs.Dir{
Name: bucket.Name,
Bytes: 0,
Count: 0,
}
}
}
if buckets.NextPageToken == "" {
break
}
listBuckets.PageToken(buckets.NextPageToken)
}
}()
} else {
// List the directories in the path in the bucket
go func() {
defer close(out)
f.list(true, func(remote string, object *storage.Object) {
out <- &fs.Dir{
Name: remote,
Bytes: int64(object.Size),
Count: 0,
}
})
}()
}
return out
}
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
return o, o.Update(in, modTime, size)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir() error {
_, err := f.svc.Buckets.Get(f.bucket).Do()
if err == nil {
// Bucket already exists
return nil
}
if f.projectNumber == "" {
return fmt.Errorf("Can't make bucket without project number")
}
bucket := storage.Bucket{
Name: f.bucket,
}
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketAcl).Do()
return err
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty.
func (f *Fs) Rmdir() error {
if f.root != "" {
return nil
}
return f.svc.Buckets.Delete(f.bucket).Do()
}
// Precision returns the precision
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
srcBucket := srcObj.fs.bucket
srcObject := srcObj.fs.root + srcObj.remote
dstBucket := f.bucket
dstObject := f.root + remote
newObject, err := f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
if err != nil {
return nil, err
}
// Set the metadata for the new object while we have it
dstObj.setMetaData(newObject)
return dstObj, nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Md5sum() (string, error) {
return o.md5sum, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.bytes
}
// setMetaData sets the fs data from a storage.Object
func (o *Object) setMetaData(info *storage.Object) {
o.url = info.MediaLink
o.bytes = int64(info.Size)
// Read md5sum
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
if err != nil {
fs.Log(o, "Bad MD5 decode: %v", err)
} else {
o.md5sum = hex.EncodeToString(md5sumData)
}
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(timeFormatIn, mtimeString)
if err == nil {
o.modTime = modTime
return
}
fs.Debug(o, "Failed to read mtime from metadata: %s", err)
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormatIn, info.Updated)
if err != nil {
fs.Log(o, "Bad time decode: %v", err)
} else {
o.modTime = modTime
}
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
object, err := o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
}
o.setMetaData(object)
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
// fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
return o.modTime
}
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormatOut)
return metadata
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) {
// This only adds metadata so will perserve other metadata
object := storage.Object{
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
Metadata: metadataFromModTime(modTime),
}
newObject, err := o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
}
o.setMetaData(newObject)
}
// Storable returns a boolean as to whether this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open() (in io.ReadCloser, err error) {
// This is slightly complicated by Go here insisting on
// decoding the %2F in URLs into / which is legal in http, but
// unfortunately not what the storage server wants.
//
// So first encode all the % into their encoded form
// URL will decode them giving our original escaped string
url := strings.Replace(o.url, "%", "%25", -1)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
// SetOpaque sets Opaque such that HTTP requests to it don't
// alter any hex-escaped characters
googleapi.SetOpaque(req.URL)
req.Header.Set("User-Agent", fs.UserAgent)
res, err := o.fs.client.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
_ = res.Body.Close() // ignore error
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
object := storage.Object{
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
ContentType: fs.MimeType(o),
Size: uint64(size),
Updated: modTime.Format(timeFormatOut), // Doesn't get set
Metadata: metadataFromModTime(modTime),
}
newObject, err := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in).Name(object.Name).PredefinedAcl(o.fs.objectAcl).Do()
if err != nil {
return err
}
// Set the metadata for the new object while we have it
o.setMetaData(newObject)
return nil
}
// Remove an object
func (o *Object) Remove() error {
return o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -0,0 +1,56 @@
// Test GoogleCloudStorage filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package googlecloudstorage_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/googlecloudstorage"
)
func init() {
fstests.NilObject = fs.Object((*googlecloudstorage.Object)(nil))
fstests.RemoteName = "TestGoogleCloudStorage:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

BIN
graphics/rclone-256x256.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

BIN
graphics/rclone-50x50.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.7 KiB

BIN
graphics/rclone-64x64.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.7 KiB

View File

@@ -1,56 +1,69 @@
// Local filesystem interface
// Package local provides a filesystem interface
package local
import (
"crypto/md5"
"encoding/hex"
"fmt"
"hash"
"io"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/ncw/rclone/fs"
)
// Register with Fs
func init() {
fs.Register(&fs.FsInfo{
fs.Register(&fs.Info{
Name: "local",
NewFs: NewFs,
})
}
// FsLocal represents a local filesystem rooted at root
type FsLocal struct {
root string // The root directory
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warned map[string]struct{} // whether we have warned about this string
}
// FsObjectLocal represents a local filesystem object
type FsObjectLocal struct {
local fs.Fs // The Fs this object is part of
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path
path string // The local path
info os.FileInfo // Interface for file info
info os.FileInfo // Interface for file info (always present)
md5sum string // the md5sum of the object or "" if not calculated
}
// ------------------------------------------------------------
// NewFs contstructs an FsLocal from the path
// NewFs constructs an Fs from the path
func NewFs(name, root string) (fs.Fs, error) {
root = path.Clean(root)
f := &FsLocal{root: root}
var err error
f := &Fs{
name: name,
warned: make(map[string]struct{}),
}
f.root = filterPath(f.cleanUtf8(root))
// Check to see if this points to a file
fi, err := os.Lstat(f.root)
if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root
remote := path.Base(root)
f.root = path.Dir(root)
var remote string
f.root, remote = getDirFile(f.root)
obj := f.NewFsObject(remote)
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
@@ -58,58 +71,78 @@ func NewFs(name, root string) (fs.Fs, error) {
return f, nil
}
// String converts this FsLocal to a string
func (f *FsLocal) String() string {
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Local file system at %s", f.root)
}
// newFsObject makes a half completed Object
func (f *Fs) newFsObject(remote string) *Object {
remote = filepath.ToSlash(remote)
dstPath := filterPath(filepath.Join(f.root, f.cleanUtf8(remote)))
return &Object{
fs: f,
remote: remote,
path: dstPath,
}
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsLocal) NewFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
path := filepath.Join(f.root, remote)
o := &FsObjectLocal{local: f, remote: remote, path: path}
func (f *Fs) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
o := f.newFsObject(remote)
if info != nil {
o.info = info
} else {
err := o.lstat()
if err != nil {
fs.Debug(o, "Failed to stat %s: %s", path, err)
fs.Debug(o, "Failed to stat %s: %s", o.path, err)
return nil
}
}
return o
}
// Return an FsObject from a path
// NewFsObject returns an FsObject from a path
//
// May return nil if an error occurred
func (f *FsLocal) NewFsObject(remote string) fs.Object {
return f.NewFsObjectWithInfo(remote, nil)
func (f *Fs) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
}
// List the path returning a channel of FsObjects
//
// Ignores everything which isn't Storable, eg links etc
func (f *FsLocal) List() fs.ObjectsChan {
func (f *Fs) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
fs.Stats.Error()
log.Printf("Failed to open directory: %s: %s", path, err)
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
} else {
remote, err := filepath.Rel(f.root, path)
if err != nil {
fs.Stats.Error()
log.Printf("Failed to get relative path %s: %s", path, err)
fs.ErrorLog(f, "Failed to get relative path %s: %s", path, err)
return nil
}
if remote == "." {
return nil
// remote = ""
}
if fs := f.NewFsObjectWithInfo(remote, fi); fs != nil {
if fs := f.newFsObjectWithInfo(remote, fi); fs != nil {
if fs.Storable() {
out <- fs
}
@@ -119,46 +152,63 @@ func (f *FsLocal) List() fs.ObjectsChan {
})
if err != nil {
fs.Stats.Error()
log.Printf("Failed to open directory: %s: %s", f.root, err)
fs.ErrorLog(f, "Failed to open directory: %s: %s", f.root, err)
}
close(out)
}()
return out
}
// Walk the path returning a channel of FsObjects
func (f *FsLocal) ListDir() fs.DirChan {
// CleanUtf8 makes string a valid UTF-8 string
//
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
func (f *Fs) cleanUtf8(name string) string {
if !utf8.ValidString(name) {
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
name = string([]rune(name))
}
if runtime.GOOS == "windows" {
name = cleanWindowsName(f, name)
}
return name
}
// ListDir walks the path returning a channel of FsObjects
func (f *Fs) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
items, err := ioutil.ReadDir(f.root)
if err != nil {
fs.Stats.Error()
log.Printf("Couldn't find read directory: %s", err)
fs.ErrorLog(f, "Couldn't find read directory: %s", err)
} else {
for _, item := range items {
if item.IsDir() {
dir := &fs.Dir{
Name: item.Name(),
Name: f.cleanUtf8(item.Name()),
When: item.ModTime(),
Bytes: 0,
Count: 0,
}
// Go down the tree to count the files and directories
dirpath := path.Join(f.root, item.Name())
dirpath := filterPath(filepath.Join(f.root, item.Name()))
err := filepath.Walk(dirpath, func(path string, fi os.FileInfo, err error) error {
if err != nil {
fs.Stats.Error()
log.Printf("Failed to open directory: %s: %s", path, err)
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
} else {
dir.Count += 1
dir.Count++
dir.Bytes += fi.Size()
}
return nil
})
if err != nil {
fs.Stats.Error()
log.Printf("Failed to open directory: %s: %s", dirpath, err)
fs.ErrorLog(f, "Failed to open directory: %s: %s", dirpath, err)
}
out <- dir
}
@@ -169,28 +219,32 @@ func (f *FsLocal) ListDir() fs.DirChan {
return out
}
// Puts the FsObject to the local filesystem
func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
dstPath := filepath.Join(f.root, remote)
// Temporary FsObject under construction
fs := &FsObjectLocal{local: f, remote: remote, path: dstPath}
return fs, fs.Update(in, modTime, size)
// Put the FsObject to the local filesystem
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction - info filled in by Update()
o := f.newFsObject(remote)
err := o.Update(in, modTime, size)
if err != nil {
return nil, err
}
return o, nil
}
// Mkdir creates the directory if it doesn't exist
func (f *FsLocal) Mkdir() error {
return os.MkdirAll(f.root, 0770)
func (f *Fs) Mkdir() error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
return os.MkdirAll(f.root, 0777)
}
// Rmdir removes the directory
//
// If it isn't empty it will return an error
func (f *FsLocal) Rmdir() error {
func (f *Fs) Rmdir() error {
return os.Remove(f.root)
}
// Return the precision
func (f *FsLocal) Precision() (precision time.Duration) {
// Precision of the file system
func (f *Fs) Precision() (precision time.Duration) {
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
})
@@ -198,7 +252,7 @@ func (f *FsLocal) Precision() (precision time.Duration) {
}
// Read the precision
func (f *FsLocal) readPrecision() (precision time.Duration) {
func (f *Fs) readPrecision() (precision time.Duration) {
// Default precision of 1s
precision = time.Second
@@ -211,12 +265,15 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
}
path := fd.Name()
// fmt.Println("Created temp file", path)
fd.Close()
err = fd.Close()
if err != nil {
return time.Second
}
// Delete it on return
defer func() {
// fmt.Println("Remove temp file")
os.Remove(path)
_ = os.Remove(path) // ignore error
}()
// Find the minimum duration we can detect
@@ -246,86 +303,246 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
return
}
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
fi, err := os.Lstat(f.root)
if err != nil {
return err
}
if !fi.Mode().IsDir() {
return fmt.Errorf("Can't Purge non directory: %q", f.root)
}
return os.RemoveAll(f.root)
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Temporary FsObject under construction
dstObj := f.newFsObject(remote)
// Check it is a file if it exists
err := dstObj.lstat()
if os.IsNotExist(err) {
// OK
} else if err != nil {
return nil, err
} else if !dstObj.info.Mode().IsRegular() {
// It isn't a file
return nil, fmt.Errorf("Can't move file onto non-file")
}
// Create destination
err = dstObj.mkdirAll()
if err != nil {
return nil, err
}
// Do the move
err = os.Rename(srcObj.path, dstObj.path)
if err != nil {
return nil, err
}
// Update the info
err = dstObj.lstat()
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src directory to this remote using server side move
// operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debug(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
// Check if source exists
sstat, err := os.Lstat(srcFs.root)
if err != nil {
return err
}
// And is a directory
if !sstat.IsDir() {
return fs.ErrorCantDirMove
}
// Check if destination exists
_, err = os.Lstat(f.root)
if !os.IsNotExist(err) {
return fs.ErrorDirExists
}
// Do the move
return os.Rename(srcFs.root, f.root)
}
// ------------------------------------------------------------
// Return the parent Fs
func (o *FsObjectLocal) Fs() fs.Fs {
return o.local
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
return o.fs
}
// Return a string version
func (o *FsObjectLocal) String() string {
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Return the remote path
func (o *FsObjectLocal) Remote() string {
return o.remote
// Remote returns the remote path
func (o *Object) Remote() string {
return o.fs.cleanUtf8(o.remote)
}
// Md5sum calculates the Md5sum of a file returning a lowercase hex string
func (o *FsObjectLocal) Md5sum() (string, error) {
func (o *Object) Md5sum() (string, error) {
if o.md5sum != "" {
return o.md5sum, nil
}
in, err := os.Open(o.path)
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to open: %s", err)
fs.ErrorLog(o, "Failed to open: %s", err)
return "", err
}
defer in.Close() // FIXME ignoring error
hash := md5.New()
_, err = io.Copy(hash, in)
closeErr := in.Close()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to read: %s", err)
fs.ErrorLog(o, "Failed to read: %s", err)
return "", err
}
return fmt.Sprintf("%x", hash.Sum(nil)), nil
if closeErr != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to close: %s", closeErr)
return "", closeErr
}
o.md5sum = hex.EncodeToString(hash.Sum(nil))
return o.md5sum, nil
}
// Size returns the size of an object in bytes
func (o *FsObjectLocal) Size() int64 {
func (o *Object) Size() int64 {
return o.info.Size()
}
// ModTime returns the modification time of the object
func (o *FsObjectLocal) ModTime() time.Time {
func (o *Object) ModTime() time.Time {
return o.info.ModTime()
}
// Sets the modification time of the local fs object
func (o *FsObjectLocal) SetModTime(modTime time.Time) {
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) {
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
fs.Debug(o, "Failed to set mtime on file: %s", err)
return
}
// Re-read metadata
err = o.lstat()
if err != nil {
fs.Debug(o, "Failed to stat: %s", err)
return
}
}
// Is this object storable
func (o *FsObjectLocal) Storable() bool {
// Storable returns a boolean showing if this object is storable
func (o *Object) Storable() bool {
mode := o.info.Mode()
if mode&(os.ModeSymlink|os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
fs.Debug(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
fs.Debug(o, "FIXME Skipping directory")
// fs.Debug(o, "Skipping directory")
return false
}
return true
}
// Open an object for read
func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
in, err = os.Open(o.path)
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
// object that is read
type localOpenFile struct {
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash hash.Hash // currently accumulating MD5
}
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
n, err = file.in.Read(p)
if n > 0 {
// Hash routines never return an error
_, _ = file.hash.Write(p[:n])
}
return
}
// Close the object and update the md5sum
func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
file.o.md5sum = hex.EncodeToString(file.hash.Sum(nil))
} else {
file.o.md5sum = ""
}
return err
}
// Open an object for read
func (o *Object) Open() (in io.ReadCloser, err error) {
in, err = os.Open(o.path)
if err != nil {
return
}
// Update the md5sum as we go along
in = &localOpenFile{
o: o,
in: in,
hash: md5.New(),
}
return
}
// mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error {
dir, _ := getDirFile(o.path)
return os.MkdirAll(dir, 0777)
}
// Update the object from in with modTime and size
func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) error {
dir := path.Dir(o.path)
err := os.MkdirAll(dir, 0770)
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
err := o.mkdirAll()
if err != nil {
return err
}
@@ -335,6 +552,10 @@ func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) erro
return err
}
// Calculate the md5sum of the object we are reading as we go along
hash := md5.New()
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
outErr := out.Close()
if err != nil {
@@ -344,23 +565,128 @@ func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) erro
return outErr
}
// All successful so update the md5sum
o.md5sum = hex.EncodeToString(hash.Sum(nil))
// Set the mtime
o.SetModTime(modTime)
return nil
// ReRead info now that we have finished
return o.lstat()
}
// Stat a FsObject into info
func (o *FsObjectLocal) lstat() error {
func (o *Object) lstat() error {
info, err := os.Lstat(o.path)
o.info = info
return err
}
// Remove an object
func (o *FsObjectLocal) Remove() error {
func (o *Object) Remove() error {
return os.Remove(o.path)
}
// Return the current directory and file from a path
// Assumes os.PathSeparator is used.
func getDirFile(s string) (string, string) {
i := strings.LastIndex(s, string(os.PathSeparator))
return s[:i], s[i+1:]
}
func filterPath(s string) string {
s = filepath.Clean(s)
if runtime.GOOS == "windows" {
s = strings.Replace(s, `/`, `\`, -1)
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
// Convert to UNC
return uncPath(s)
}
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
return s
}
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(s string) string {
// UNC can NOT use "/", so convert all to "\"
s = strings.Replace(s, `/`, `\`, -1)
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(s, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(s, `\\?\`) {
return s
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
}
if isAbsWinDrive.MatchString(s) {
return `\\?\` + s
}
return s
}
// cleanWindowsName will clean invalid Windows characters
func cleanWindowsName(f *Fs, name string) string {
original := name
var name2 string
if strings.HasPrefix(name, `\\?\`) {
name2 = `\\?\`
name = strings.TrimPrefix(name, `\\?\`)
}
if strings.HasPrefix(name, `//?/`) {
name2 = `//?/`
name = strings.TrimPrefix(name, `//?/`)
}
// Colon is allowed as part of a drive name X:\
colonAt := strings.Index(name, ":")
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
// Copy to name2, which is unfiltered
name2 += name[0 : colonAt+1]
name = name[colonAt+1:]
}
name2 += strings.Map(func(r rune) rune {
switch r {
case '<', '>', '"', '|', '?', '*', ':':
return '_'
}
return r
}, name)
if name2 != original && f != nil {
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid characters in %q to %q", name, name2)
f.warned[name] = struct{}{}
}
}
return name2
}
// Check the interfaces are satisfied
var _ fs.Fs = &FsLocal{}
var _ fs.Object = &FsObjectLocal{}
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Object = &Object{}
)

56
local/local_test.go Normal file
View File

@@ -0,0 +1,56 @@
// Test Local filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package local_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/local"
)
func init() {
fstests.NilObject = fs.Object((*local.Object)(nil))
fstests.RemoteName = ""
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

91
local/tests_test.go Normal file
View File

@@ -0,0 +1,91 @@
package local
import (
"testing"
)
var uncTestPaths = []string{
"C:\\Ba*d\\P|a?t<h>\\Windows\\Folder",
"C:/Ba*d/P|a?t<h>/Windows\\Folder",
"C:\\Windows\\Folder",
"\\\\?\\C:\\Windows\\Folder",
"//?/C:/Windows/Folder",
"\\\\?\\UNC\\server\\share\\Desktop",
"\\\\?\\unC\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
"\\\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
"C:\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
"C:\\AbsoluteToRoot\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
"\\\\server\\share\\Desktop",
"\\\\?\\UNC\\\\share\\folder\\Desktop",
"\\\\server\\share",
}
var uncTestPathsResults = []string{
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\UNC\server\share\Desktop`,
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\UNC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\UNC\server\share\Desktop`,
`\\?\UNC\\share\folder\Desktop`,
`\\?\UNC\server\share`,
}
// Test that UNC paths are converted.
func TestUncPaths(t *testing.T) {
for i, p := range uncTestPaths {
unc := uncPath(p)
if unc != uncTestPathsResults[i] {
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
}
// Test we don't add more.
unc = uncPath(unc)
if unc != uncTestPathsResults[i] {
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
}
}
}
var utf8Tests = [][2]string{
[2]string{"ABC", "ABC"},
[2]string{string([]byte{0x80}), "<22>"},
[2]string{string([]byte{'a', 0x80, 'b'}), "a<>b"},
}
func TestCleanUtf8(t *testing.T) {
f := &Fs{}
f.warned = make(map[string]struct{})
for _, test := range utf8Tests {
got := f.cleanUtf8(test[0])
expect := test[1]
if got != expect {
t.Fatalf("got %q, expected %q", got, expect)
}
}
}
// Test Windows character replacements
var testsWindows = [][2]string{
[2]string{`c:\temp`, `c:\temp`},
[2]string{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
[2]string{`//?/UNC/theserver/dir\file.txt`, `//?/UNC/theserver/dir\file.txt`},
[2]string{"c:/temp", "c:/temp"},
[2]string{"/temp/file.txt", "/temp/file.txt"},
[2]string{`!\"#¤%&/()=;:*^?+-`, "!\\_#¤%&/()=;__^_+-"},
[2]string{`<>"|?*:&\<>"|?*:&\<>"|?*:&`, "_______&\\_______&\\_______&"},
}
func TestCleanWindows(t *testing.T) {
for _, test := range testsWindows {
got := cleanWindowsName(nil, test[0])
expect := test[1]
if got != expect {
t.Fatalf("got %q, expected %q", got, expect)
}
}
}

84
make_manual.py Executable file
View File

@@ -0,0 +1,84 @@
#!/usr/bin/python
"""
Make single page versions of the documentation for release and
conversion into man pages etc.
"""
import os
import re
from datetime import datetime
docpath = "docs/content"
outfile = "MANUAL.md"
# Order to add docs segments to make outfile
docs = [
"about.md",
"install.md",
"docs.md",
"filtering.md",
"overview.md",
"drive.md",
"s3.md",
"swift.md",
"dropbox.md",
"googlecloudstorage.md",
"amazonclouddrive.md",
"onedrive.md",
"local.md",
"changelog.md",
"bugs.md",
"faq.md",
"licence.md",
"authors.md",
"contact.md",
]
# Docs which aren't made into outfile
ignore_docs = [
"downloads.md",
"privacy.md",
"donate.md",
]
def read_doc(doc):
"""Read file as a string"""
path = os.path.join(docpath, doc)
with open(path) as fd:
contents = fd.read()
parts = contents.split("---\n", 2)
if len(parts) != 3:
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
contents = parts[2].strip()+"\n\n"
# Remove icons
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
# Make [...](/links/) absolute
contents = re.sub(r'\((\/.*?\/)\)', r"(http://rclone.org\1)", contents)
return contents
def check_docs(docpath):
"""Check all the docs are in docpath"""
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
files -= set(ignore_docs)
docs_set = set(docs)
if files == docs_set:
return
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
raise ValueError("Missing files")
def main():
check_docs(docpath)
with open(outfile, "w") as out:
out.write("""\
%% rclone(1) User Manual
%% Nick Craig-Wood
%% %s
""" % datetime.now().strftime("%b %d, %Y"))
for doc in docs:
out.write(read_doc(doc))
print "Written '%s'" % outfile
if __name__ == "__main__":
main()

View File

@@ -1,3 +1,26 @@
Perhaps make Md5sum() and Modtime() optional. Define the zero values
"" and 0. Make it so we can support remotes which can't do those.
Fix the docs
* factor the README.md into the docs directory
* create it as part of make by assembling other parts
* write long docs about each flag
Change lsd command so it doesn't show -1
* Make sure all Fses show -1 for objects Zero for dates etc
* Make test?
Put the TestRemote names into the Fs description
Make test_all.sh use the TestRemote name automatically
Run errcheck and go vet in the make file
.. Also race detector?
.. go tool vet -shadow
Get rid of Storable?
Write developer manual
Todo
* FIXME: More -dry-run checks for object transfer
* Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files
@@ -5,16 +28,23 @@ Todo
* if object.PseudoDirectory {
* fmt.Printf("%9s %19s %s\n", "Directory", "-", fs.Remote())
* Make Account wrapper
* limit bandwidth for a pool of all individual connectinos
* do timeouts by setting a limit, seeing whether io has happened
and resetting it if it has
* make Account do progress meter
* Make logging controllable with flags (mostly done)
* -timeout: Make all timeouts be settable with command line parameters
* Windows paths? Do we need to translate / and \?
* Make a fs.Errorf and count errors and log them at a different level
* Add max object size to fs metadata - 5GB for swift, infinite for local, ? for s3
* tie into -max-size flag
* FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs
creation in common code? Or try for as much as possible?
* FIXME Account all the transactons (ls etc) using a different
Roundtripper wrapper which wraps the transactions?
More rsync features
* include
* exclude
* max size
* -c, --checksum skip based on checksum, not mod-time & size
Ideas for flags
* --retries N flag which would make rclone retry a sync until successful or it tried N times.
Ideas
* could do encryption - put IV into metadata?
@@ -23,42 +53,20 @@ Ideas
* support
* sftp
* scp
* Google cloud storage: https://developers.google.com/storage/
* rsync over ssh
* dropbox: https://github.com/nickoneill/go-dropbox (no MD5s)
* control times sync (which is slow) with -a --archive flag?
* control times sync (which is slow with some remotes) with -a --archive flag?
* Copy a glob pattern - could do with LimitedFs
Need to make directory objects otherwise can't upload an empty directory
* Or could upload empty directories only?
* Can't purge a local filesystem because it leaves the directories behind
Make an encryption layer.
Copying a single file? Or maybe with a glob pattern? Could do with LimitedFs
This would layer over the source FS to
* decrypt all gets
* encrypt all puts
* encrypt file names in list
* decrypt them in list
s3
* Can maybe set last modified?
* https://forums.aws.amazon.com/message.jspa?messageID=214062
* Otherwise can set metadata
* Returns etag and last modified in bucket list
Would like to be able to see unencrypted file names in remote though? How? Or is that two encryption layers..?
Bugs
* Non verbose - not sure number transferred got counted up? CHECK
* When doing copy it recurses the whole of the destination FS which isn't necessary
Making a release
* go build ./...
* cd rclonetest
* go build
* ./rclonetest memstore:
* ./rclonetest s3:
* ./rclonetest drive2:
* ./rclonetest /tmp/z
* cd ..
* make tag
* edit README.md Changelog
* git commit version.go rclonetest/version.go README.md docs/content/downloads.md
* make retag
* . ~/bin/go-cross
* make cross
* make upload
* make upload_website
* git push --tags

328
oauthutil/oauthutil.go Normal file
View File

@@ -0,0 +1,328 @@
package oauthutil
import (
"crypto/rand"
"encoding/json"
"fmt"
"log"
"net"
"net/http"
"time"
"github.com/ncw/rclone/fs"
"github.com/skratchdot/open-golang/open"
"golang.org/x/net/context"
"golang.org/x/oauth2"
)
const (
// ConfigToken is the key used to store the token under
ConfigToken = "token"
// ConfigClientID is the config key used to store the client id
ConfigClientID = "client_id"
// ConfigClientSecret is the config key used to store the client secret
ConfigClientSecret = "client_secret"
// TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization
// code should be returned in the title bar of the browser, with the page text
// prompting the user to copy the code and paste it in the application.
TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob"
// bindPort is the port that we bind the local webserver to
bindPort = "53682"
// bindAddress is binding for local webserver when active
bindAddress = "127.0.0.1:" + bindPort
// RedirectURL is redirect to local webserver when active
RedirectURL = "http://" + bindAddress + "/"
// RedirectPublicURL is redirect to local webserver when active with public name
RedirectPublicURL = "http://localhost.rclone.org:" + bindPort + "/"
)
// oldToken contains an end-user's tokens.
// This is the data you must store to persist authentication.
//
// From the original code.google.com/p/goauth2/oauth package - used
// for backwards compatibility in the rclone config file
type oldToken struct {
AccessToken string
RefreshToken string
Expiry time.Time
}
// getToken returns the token saved in the config file under
// section name.
func getToken(name string) (*oauth2.Token, error) {
tokenString, err := fs.ConfigFile.GetValue(string(name), ConfigToken)
if err != nil {
return nil, err
}
if tokenString == "" {
return nil, fmt.Errorf("Empty token found - please run rclone config again")
}
token := new(oauth2.Token)
err = json.Unmarshal([]byte(tokenString), token)
if err != nil {
return nil, err
}
// if has data then return it
if token.AccessToken != "" && token.RefreshToken != "" {
return token, nil
}
// otherwise try parsing as oldToken
oldtoken := new(oldToken)
err = json.Unmarshal([]byte(tokenString), oldtoken)
if err != nil {
return nil, err
}
// Fill in result into new token
token.AccessToken = oldtoken.AccessToken
token.RefreshToken = oldtoken.RefreshToken
token.Expiry = oldtoken.Expiry
// Save new format in config file
err = putToken(name, token)
if err != nil {
return nil, err
}
return token, nil
}
// putToken stores the token in the config file
//
// This saves the config file if it changes
func putToken(name string, token *oauth2.Token) error {
tokenBytes, err := json.Marshal(token)
if err != nil {
return err
}
tokenString := string(tokenBytes)
old := fs.ConfigFile.MustValue(name, ConfigToken)
if tokenString != old {
fs.ConfigFile.SetValue(name, ConfigToken, tokenString)
fs.SaveConfig()
fs.Debug(name, "Saving new token in config file")
}
return nil
}
// tokenSource stores updated tokens in the config file
type tokenSource struct {
Name string
TokenSource oauth2.TokenSource
OldToken oauth2.Token
}
// Token returns a token or an error.
// Token must be safe for concurrent use by multiple goroutines.
// The returned Token must not be modified.
//
// This saves the token in the config file if it has changed
func (ts *tokenSource) Token() (*oauth2.Token, error) {
token, err := ts.TokenSource.Token()
if err != nil {
return nil, err
}
if *token != ts.OldToken {
err = putToken(ts.Name, token)
if err != nil {
return nil, err
}
}
return token, nil
}
// Check interface satisfied
var _ oauth2.TokenSource = (*tokenSource)(nil)
// Context returns a context with our HTTP Client baked in for oauth2
func Context() context.Context {
return context.WithValue(nil, oauth2.HTTPClient, fs.Config.Client())
}
// overrideCredentials sets the ClientID and ClientSecret from the
// config file if they are not blank
func overrideCredentials(name string, config *oauth2.Config) {
ClientID := fs.ConfigFile.MustValue(name, ConfigClientID)
if ClientID != "" {
config.ClientID = ClientID
}
ClientSecret := fs.ConfigFile.MustValue(name, ConfigClientSecret)
if ClientSecret != "" {
config.ClientSecret = ClientSecret
}
}
// NewClient gets a token from the config file and configures a Client
// with it
func NewClient(name string, config *oauth2.Config) (*http.Client, error) {
overrideCredentials(name, config)
token, err := getToken(name)
if err != nil {
return nil, err
}
// Set our own http client in the context
ctx := Context()
// Wrap the TokenSource in our TokenSource which saves changed
// tokens in the config file
ts := &tokenSource{
Name: name,
OldToken: *token,
TokenSource: config.TokenSource(ctx, token),
}
return oauth2.NewClient(ctx, ts), nil
}
// Config does the initial creation of the token
//
// It may run an internal webserver to receive the results
func Config(name string, config *oauth2.Config) error {
overrideCredentials(name, config)
// See if already have a token
tokenString := fs.ConfigFile.MustValue(name, "token")
if tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !fs.Confirm() {
return nil
}
}
// Detect whether we should use internal web server
useWebServer := false
switch config.RedirectURL {
case RedirectURL, RedirectPublicURL:
useWebServer = true
case TitleBarRedirectURL:
fmt.Printf("Use auto config?\n")
fmt.Printf(" * Say Y if not sure\n")
fmt.Printf(" * Say N if you are working on a remote or headless machine or Y didn't work\n")
useWebServer = fs.Confirm()
if useWebServer {
// copy the config and set to use the internal webserver
configCopy := *config
config = &configCopy
config.RedirectURL = RedirectURL
}
}
// Make random state
stateBytes := make([]byte, 16)
_, err := rand.Read(stateBytes)
if err != nil {
return err
}
state := fmt.Sprintf("%x", stateBytes)
authURL := config.AuthCodeURL(state)
// Prepare webserver
server := authServer{
state: state,
bindAddress: bindAddress,
authURL: authURL,
}
if useWebServer {
server.code = make(chan string, 1)
go server.Start()
defer server.Stop()
authURL = "http://" + bindAddress + "/auth"
}
// Generate a URL for the user to visit for authorization.
_ = open.Start(authURL)
fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authURL)
fmt.Printf("Log in and authorize rclone for access\n")
var authCode string
if useWebServer {
// Read the code, and exchange it for a token.
fmt.Printf("Waiting for code...\n")
authCode = <-server.code
if authCode != "" {
fmt.Printf("Got code\n")
} else {
return fmt.Errorf("Failed to get code")
}
} else {
// Read the code, and exchange it for a token.
fmt.Printf("Enter verification code> ")
authCode = fs.ReadLine()
}
token, err := config.Exchange(oauth2.NoContext, authCode)
if err != nil {
return fmt.Errorf("Failed to get token: %v", err)
}
return putToken(name, token)
}
// Local web server for collecting auth
type authServer struct {
state string
listener net.Listener
bindAddress string
code chan string
authURL string
}
// startWebServer runs an internal web server to receive config details
func (s *authServer) Start() {
fs.Debug(nil, "Starting auth server on %s", s.bindAddress)
mux := http.NewServeMux()
server := &http.Server{
Addr: s.bindAddress,
Handler: mux,
}
mux.HandleFunc("/favicon.ico", func(w http.ResponseWriter, req *http.Request) {
http.Error(w, "", 404)
return
})
mux.HandleFunc("/auth", func(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, s.authURL, 307)
return
})
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
fs.Debug(nil, "Received request on auth server")
code := req.FormValue("code")
if code != "" {
state := req.FormValue("state")
if state != s.state {
fs.Debug(nil, "State did not match: want %q got %q", s.state, state)
fmt.Fprintf(w, "<h1>Failure</h1>\n<p>Auth state doesn't match</p>")
} else {
fs.Debug(nil, "Successfully got code")
if s.code != nil {
fmt.Fprintf(w, "<h1>Success</h1>\n<p>Go back to rclone to continue</p>")
s.code <- code
} else {
fmt.Fprintf(w, "<h1>Success</h1>\n<p>Cut and paste this code into rclone: <code>%s</code></p>", code)
}
}
return
}
fs.Debug(nil, "No code found on request")
fmt.Fprintf(w, "<h1>Failed!</h1>\nNo code found.")
http.Error(w, "", 500)
})
var err error
s.listener, err = net.Listen("tcp", s.bindAddress)
if err != nil {
log.Fatalf("Failed to start auth webserver: %v", err)
}
err = server.Serve(s.listener)
fs.Debug(nil, "Closed auth server with error: %v", err)
}
func (s *authServer) Stop() {
fs.Debug(nil, "Closing auth server")
if s.code != nil {
close(s.code)
s.code = nil
}
_ = s.listener.Close()
}

141
onedrive/api/api.go Normal file
View File

@@ -0,0 +1,141 @@
// Package api implements the API for one drive
package api
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/ncw/rclone/fs"
)
const (
rootURL = "https://api.onedrive.com/v1.0" // root URL for requests
)
// Client contains the info to sustain the API
type Client struct {
c *http.Client
}
// NewClient takes an oauth http.Client and makes a new api instance
func NewClient(c *http.Client) *Client {
return &Client{
c: c,
}
}
// Opts contains parameters for Call, CallJSON etc
type Opts struct {
Method string
Path string
Absolute bool // Path is absolute
Body io.Reader
NoResponse bool // set to close Body
ContentType string
ContentLength *int64
ContentRange string
ExtraHeaders map[string]string
}
// checkClose is a utility function used to check the return from
// Close in a defer statement.
func checkClose(c io.Closer, err *error) {
cerr := c.Close()
if *err == nil {
*err = cerr
}
}
// DecodeJSON decodes resp.Body into result
func DecodeJSON(resp *http.Response, result interface{}) (err error) {
defer checkClose(resp.Body, &err)
decoder := json.NewDecoder(resp.Body)
return decoder.Decode(result)
}
// Call makes the call and returns the http.Response
//
// if err != nil then resp.Body will need to be closed
//
// it will return resp if at all possible, even if err is set
func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
if opts == nil {
return nil, fmt.Errorf("call() called with nil opts")
}
var url string
if opts.Absolute {
url = opts.Path
} else {
url = rootURL + opts.Path
}
req, err := http.NewRequest(opts.Method, url, opts.Body)
if err != nil {
return
}
if opts.ContentType != "" {
req.Header.Add("Content-Type", opts.ContentType)
}
if opts.ContentLength != nil {
req.ContentLength = *opts.ContentLength
}
if opts.ContentRange != "" {
req.Header.Add("Content-Range", opts.ContentRange)
}
if opts.ExtraHeaders != nil {
for k, v := range opts.ExtraHeaders {
req.Header.Add(k, v)
}
}
req.Header.Add("User-Agent", fs.UserAgent)
resp, err = api.c.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
// Decode error response
errResponse := new(Error)
err = DecodeJSON(resp, &errResponse)
if err != nil {
return resp, err
}
if errResponse.ErrorInfo.Code == "" {
errResponse.ErrorInfo.Code = resp.Status
}
return resp, errResponse
}
if opts.NoResponse {
return resp, resp.Body.Close()
}
return resp, nil
}
// CallJSON runs Call and decodes the body as a JSON object into response (if not nil)
//
// If request is not nil then it will be JSON encoded as the body of the request
//
// It will return resp if at all possible, even if err is set
func (api *Client) CallJSON(opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
// Set the body up as a JSON object if required
if opts.Body == nil && request != nil {
body, err := json.Marshal(request)
if err != nil {
return nil, err
}
var newOpts = *opts
newOpts.Body = bytes.NewBuffer(body)
newOpts.ContentType = "application/json"
opts = &newOpts
}
resp, err = api.Call(opts)
if err != nil {
return resp, err
}
if opts.NoResponse {
return resp, nil
}
err = DecodeJSON(resp, response)
return resp, err
}

213
onedrive/api/types.go Normal file
View File

@@ -0,0 +1,213 @@
// Types passed and returned to and from the API
package api
import "time"
const (
timeFormat = `"` + time.RFC3339 + `"`
)
// Error is returned from one drive when things go wrong
type Error struct {
ErrorInfo struct {
Code string `json:"code"`
Message string `json:"message"`
InnerError struct {
Code string `json:"code"`
} `json:"innererror"`
} `json:"error"`
}
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := e.ErrorInfo.Code
if e.ErrorInfo.InnerError.Code != "" {
out += ": " + e.ErrorInfo.InnerError.Code
}
out += ": " + e.ErrorInfo.Message
return out
}
// Check Error statisfies the error interface
var _ error = (*Error)(nil)
// Identity represents an identity of an actor. For example, and actor
// can be a user, device, or application.
type Identity struct {
DisplayName string `json:"displayName"`
ID string `json:"id"`
}
// IdentitySet is a keyed collection of Identity objects. It is used
// to represent a set of identities associated with various events for
// an item, such as created by or last modified by.
type IdentitySet struct {
User Identity `json:"user"`
Application Identity `json:"application"`
Device Identity `json:"device"`
}
// Quota groups storage space quota-related information on OneDrive into a single structure.
type Quota struct {
Total int `json:"total"`
Used int `json:"used"`
Remaining int `json:"remaining"`
Deleted int `json:"deleted"`
State string `json:"state"` // normal | nearing | critical | exceeded
}
// Drive is a representation of a drive resource
type Drive struct {
ID string `json:"id"`
DriveType string `json:"driveType"`
Owner IdentitySet `json:"owner"`
Quota Quota `json:"quota"`
}
// Timestamp represents represents date and time information for the
// OneDrive API, by using ISO 8601 and is always in UTC time.
type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
out = (*time.Time)(t).UTC().AppendFormat(out, timeFormat)
return out, nil
}
// UnmarshalJSON turns JSON into a Timestamp
func (t *Timestamp) UnmarshalJSON(data []byte) error {
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Timestamp(newT)
return nil
}
// ItemReference groups data needed to reference a OneDrive item
// across the service into a single structure.
type ItemReference struct {
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
ID string `json:"id"` // Unique identifier for the item. Read/Write.
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
}
// FolderFacet groups folder-related data on OneDrive into a single structure
type FolderFacet struct {
ChildCount int64 `json:"childCount"` // Number of children contained immediately within this container.
}
// HashesType groups different types of hashes into a single structure, for an item on OneDrive.
type HashesType struct {
Sha1Hash string `json:"sha1Hash"` // base64 encoded SHA1 hash for the contents of the file (if available)
Crc32Hash string `json:"crc32Hash"` // base64 encoded CRC32 value of the file (if available)
}
// FileFacet groups file-related data on OneDrive into a single structure.
type FileFacet struct {
MimeType string `json:"mimeType"` // The MIME type for the file. This is determined by logic on the server and might not be the value provided when the file was uploaded.
Hashes HashesType `json:"hashes"` // Hashes of the file's binary content, if available.
}
// FileSystemInfoFacet contains properties that are reported by the
// device's local file system for the local version of an item. This
// facet can be used to specify the last modified date or created date
// of the item as it was on the local device.
type FileSystemInfoFacet struct {
CreatedDateTime Timestamp `json:"createdDateTime"` // The UTC date and time the file was created on a client.
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // The UTC date and time the file was last modified on a client.
}
// DeletedFacet indicates that the item on OneDrive has been
// deleted. In this version of the API, the presence (non-null) of the
// facet value indicates that the file was deleted. A null (or
// missing) value indicates that the file is not deleted.
type DeletedFacet struct {
}
// Item represents metadata for an item in OneDrive
type Item struct {
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
Name string `json:"name"` // The name of the item (filename and extension). Read-write.
ETag string `json:"eTag"` // eTag for the entire item (metadata + content). Read-only.
CTag string `json:"cTag"` // An eTag for the content of the item. This eTag is not changed if only the metadata is changed. Read-only.
CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only.
LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only.
CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only.
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only.
Description string `json:"description"` // Provide a user-visible description of the item. Read-write.
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
// Image *ImageFacet `json:"image"` // Image metadata, if the item is an image. Read-only.
// Photo *PhotoFacet `json:"photo"` // Photo metadata, if the item is a photo. Read-only.
// Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only.
// Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only.
// Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only.
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
}
// ViewDeltaResponse is the response to the view delta method
type ViewDeltaResponse struct {
Value []Item `json:"value"` // An array of Item objects which have been created, modified, or deleted.
NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of changes.
DeltaLink string `json:"@odata.deltaLink"` // A URL returned instead of @odata.nextLink after all current changes have been returned. Used to read the next set of changes in the future.
DeltaToken string `json:"@delta.token"` // A token value that can be used in the query string on manually-crafted calls to view.delta. Not needed if you're using nextLink and deltaLink.
}
// ListChildrenResponse is the response to the list children method
type ListChildrenResponse struct {
Value []Item `json:"value"` // An array of Item objects
NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of items.
}
// CreateItemRequest is the request to create an item object
type CreateItemRequest struct {
Name string `json:"name"` // Name of the folder to be created.
Folder FolderFacet `json:"folder"` // Empty Folder facet to indicate that folder is the type of resource to be created.
ConflictBehavior string `json:"@name.conflictBehavior"` // Determines what to do if an item with a matching name already exists in this folder. Accepted values are: rename, replace, and fail (the default).
}
// SetFileSystemInfo is used to Update an object's FileSystemInfo.
type SetFileSystemInfo struct {
FileSystemInfo FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
}
// CreateUploadResponse is the response from creating an upload session
type CreateUploadResponse struct {
UploadURL string `json:"uploadUrl"` // "https://sn3302.up.1drv.com/up/fe6987415ace7X4e1eF866337",
ExpirationDateTime Timestamp `json:"expirationDateTime"` // "2015-01-29T09:21:55.523Z",
NextExpectedRanges []string `json:"nextExpectedRanges"` // ["0-"]
}
// UploadFragmentResponse is the response from uploading a fragment
type UploadFragmentResponse struct {
ExpirationDateTime Timestamp `json:"expirationDateTime"` // "2015-01-29T09:21:55.523Z",
NextExpectedRanges []string `json:"nextExpectedRanges"` // ["0-"]
}
// CopyItemRequest is the request to copy an item object
//
// Note: The parentReference should include either an id or path but
// not both. If both are included, they need to reference the same
// item or an error will occur.
type CopyItemRequest struct {
ParentReference ItemReference `json:"parentReference"` // Reference to the parent item the copy will be created in.
Name *string `json:"name"` // Optional The new name for the copy. If this isn't provided, the same name will be used as the original.
}
// AsyncOperationStatus provides information on the status of a asynchronous job progress.
//
// The following API calls return AsyncOperationStatus resources:
//
// Copy Item
// Upload From URL
type AsyncOperationStatus struct {
Operation string `json:"operation"` // The type of job being run.
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
}

947
onedrive/onedrive.go Normal file
View File

@@ -0,0 +1,947 @@
// Package onedrive provides an interface to the Microsoft One Drive
// object storage system.
package onedrive
import (
"bytes"
"fmt"
"io"
"log"
"net/http"
"regexp"
"strings"
"sync"
"time"
"github.com/ncw/rclone/dircache"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/onedrive/api"
"github.com/ncw/rclone/pacer"
"github.com/spf13/pflag"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "0000000044165769"
rcloneClientSecret = "0+be4+jYw+7018HY6P3t/Izo+pTc+Yvt8+fy8NHU094="
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"wl.signin", // Allow single sign-on capabilities
"wl.offline_access", // Allow receiving a refresh token
"onedrive.readwrite", // r/w perms to all of a user's OneDrive files
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://login.live.com/oauth20_authorize.srf",
TokenURL: "https://login.live.com/oauth20_token.srf",
},
ClientID: rcloneClientID,
ClientSecret: fs.Reveal(rcloneClientSecret),
RedirectURL: oauthutil.RedirectPublicURL,
}
chunkSize = fs.SizeSuffix(10 * 1024 * 1024)
uploadCutoff = fs.SizeSuffix(10 * 1024 * 1024)
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "onedrive",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config(name, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: oauthutil.ConfigClientID,
Help: "Microsoft App Client Id - leave blank normally.",
}, {
Name: oauthutil.ConfigClientSecret,
Help: "Microsoft App Client Secret - leave blank normally.",
}},
})
pflag.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
pflag.VarP(&uploadCutoff, "onedrive-upload-cutoff", "", "Cutoff for switching to chunked upload - must be <= 100MB")
}
// Fs represents a remote one drive
type Fs struct {
name string // name of this remote
srv *api.Client // the connection to the one drive server
root string // the path we are working on
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
}
// Object describes a one drive object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
hasMetaData bool // whether info below has been set
size int64 // size of the object
modTime time.Time // modification time of the object
id string // ID of the object
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("One drive root '%s'", f.root)
}
// Pattern to match a one drive path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parsePath parses an one drive 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
opts := api.Opts{
Method: "GET",
Path: "/drive/root:/" + replaceReservedChars(path),
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
})
return info, resp, err
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
root = parsePath(root)
oAuthClient, err := oauthutil.NewClient(name, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure One Drive: %v", err)
}
f := &Fs{
name: name,
root: root,
srv: api.NewClient(oAuthClient),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
// Get rootID
rootInfo, _, err := f.readMetaDataForPath("")
if err != nil || rootInfo.ID == "" {
return nil, fmt.Errorf("Failed to get root: %v", err)
}
f.dirCache = dircache.New(root, rootInfo.ID, f)
// Find the current root
err = f.dirCache.FindRoot(false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
newF := *f
newF.dirCache = dircache.New(newRoot, rootInfo.ID, &newF)
newF.root = newRoot
// Make new Fs which is the parent
err = newF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
obj := newF.newObjectWithInfo(remote, nil)
if obj == nil {
// File doesn't exist so return old f
return f, nil
}
// return a Fs Limited to this object
return fs.NewLimited(&newF, obj), nil
}
return f, nil
}
// rootSlash returns root with a slash on if it is empty, otherwise empty string
func (f *Fs) rootSlash() string {
if f.root == "" {
return f.root
}
return f.root + "/"
}
// Return an Object from a path
//
// May return nil if an error occurred
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) fs.Object {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info
o.setMetaData(info)
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
// logged already FsDebug("Failed to read info: %s", err)
return nil
}
}
return o
}
// NewFsObject returns an Object from a path
//
// May return nil if an error occurred
func (f *Fs) NewFsObject(remote string) fs.Object {
return f.newObjectWithInfo(remote, nil)
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
// fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
parent, ok := f.dirCache.GetInv(pathID)
if !ok {
return "", false, fmt.Errorf("Couldn't find parent ID")
}
path := leaf
if parent != "" {
path = parent + "/" + path
}
if f.dirCache.FoundRoot() {
path = f.rootSlash() + path
}
info, resp, err := f.readMetaDataForPath(path)
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
return "", false, nil
}
return "", false, err
}
if info.Folder == nil {
return "", false, fmt.Errorf("Found file when looking for folder")
}
return info.ID, true, nil
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
// fs.Debug(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response
var info *api.Item
opts := api.Opts{
Method: "POST",
Path: "/drive/items/" + pathID + "/children",
}
mkdir := api.CreateItemRequest{
Name: replaceReservedChars(leaf),
ConflictBehavior: "fail",
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
return "", err
}
//fmt.Printf("...Id %q\n", *info.Id)
return info.ID, nil
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := api.Opts{
Method: "GET",
Path: "/drive/items/" + dirID + "/children?top=1000",
}
OUTER:
for {
var result api.ListChildrenResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list files: %v", err)
break
}
if len(result.Value) == 0 {
break
}
for i := range result.Value {
item := &result.Value[i]
isFolder := item.Folder != nil
if isFolder {
if filesOnly {
continue
}
} else {
if directoriesOnly {
continue
}
}
if item.Deleted != nil {
continue
}
item.Name = restoreReservedChars(item.Name)
if fn(item) {
found = true
break OUTER
}
}
if result.NextLink == "" {
break
}
opts.Path = result.NextLink
opts.Absolute = true
}
return
}
// Path should be directory path either "" or "path/"
//
// List the directory using a recursive list from the root
//
// This fetches the minimum amount of stuff but does more API calls
// which makes it slow
func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
var subError error
// Make the API request
var wg sync.WaitGroup
_, err := f.listAll(dirID, false, false, func(info *api.Item) bool {
// Recurse on directories
if info.Folder != nil {
wg.Add(1)
folder := path + info.Name + "/"
fs.Debug(f, "Reading %s", folder)
go func() {
defer wg.Done()
err := f.listDirRecursive(info.ID, folder, out)
if err != nil {
subError = err
fs.ErrorLog(f, "Error reading %s:%s", folder, err)
}
}()
} else {
if fs := f.newObjectWithInfo(path+info.Name, info); fs != nil {
out <- fs
}
}
return false
})
wg.Wait()
fs.Debug(f, "Finished reading %s", path)
if err != nil {
return err
}
if subError != nil {
return subError
}
return nil
}
// List walks the path returning a channel of Objects
func (f *Fs) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
defer close(out)
err := f.dirCache.FindRoot(false)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "List failed: %s", err)
}
}
}()
return out
}
// ListDir lists the directories
func (f *Fs) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
err := f.dirCache.FindRoot(false)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
_, err := f.listAll(f.dirCache.RootID(), true, false, func(item *api.Item) bool {
dir := &fs.Dir{
Name: item.Name,
Bytes: -1,
Count: -1,
When: time.Time(item.LastModifiedDateTime),
}
if item.Folder != nil {
dir.Count = item.Folder.ChildCount
}
out <- dir
return false
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "ListDir failed: %s", err)
}
}
}()
return out
}
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the object, leaf, directoryID and error
//
// Used to create new objects
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindPath(remote, true)
if err != nil {
return nil, leaf, directoryID, err
}
// Temporary Object under construction
o = &Object{
fs: f,
remote: remote,
}
return o, leaf, directoryID, nil
}
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
o, _, _, err := f.createObject(remote, modTime, size)
if err != nil {
return nil, err
}
return o, o.Update(in, modTime, size)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir() error {
return f.dirCache.FindRoot(true)
}
// deleteObject removes an object by ID
func (f *Fs) deleteObject(id string) error {
opts := api.Opts{
Method: "DELETE",
Path: "/drive/items/" + id,
NoResponse: true,
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(&opts)
return shouldRetry(resp, err)
})
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(check bool) error {
if f.root == "" {
return fmt.Errorf("Can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(false)
if err != nil {
return err
}
rootID := dc.RootID()
item, _, err := f.readMetaDataForPath(f.root)
if err != nil {
return err
}
if item.Folder == nil {
return fmt.Errorf("Not a folder")
}
if check && item.Folder.ChildCount != 0 {
return fmt.Errorf("Folder not empty")
}
err = f.deleteObject(rootID)
if err != nil {
return err
}
f.dirCache.ResetRoot()
if err != nil {
return err
}
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir() error {
return f.purgeCheck(true)
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// waitForJob waits for the job with status in url to complete
func (f *Fs) waitForJob(location string, o *Object) error {
deadline := time.Now().Add(fs.Config.Timeout)
for time.Now().Before(deadline) {
opts := api.Opts{
Method: "GET",
Path: location,
Absolute: true,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
return err
}
if resp.StatusCode == 202 {
var status api.AsyncOperationStatus
err = api.DecodeJSON(resp, &status)
if err != nil {
return err
}
if status.Status == "failed" || status.Status == "deleteFailed" {
return fmt.Errorf("Async operation %q returned %q", status.Operation, status.Status)
}
} else {
var info api.Item
err = api.DecodeJSON(resp, &info)
if err != nil {
return err
}
o.setMetaData(&info)
return nil
}
time.Sleep(1 * time.Second)
}
return fmt.Errorf("Async operation didn't complete after %v", fs.Config.Timeout)
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err := srcObj.readMetaData()
if err != nil {
return nil, err
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil {
return nil, err
}
// Copy the object
opts := api.Opts{
Method: "POST",
Path: "/drive/items/" + srcObj.id + "/action.copy",
ExtraHeaders: map[string]string{"Prefer": "respond-async"},
NoResponse: true,
}
replacedLeaf := replaceReservedChars(leaf)
copy := api.CopyItemRequest{
Name: &replacedLeaf,
ParentReference: api.ItemReference{
ID: directoryID,
},
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &copy, nil)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
// read location header
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Didn't receive location header in copy response")
}
// Wait for job to finish
err = f.waitForJob(location, dstObj)
if err != nil {
return nil, err
}
return dstObj, nil
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
return f.purgeCheck(false)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// srvPath returns a path for use in server
func (o *Object) srvPath() string {
return replaceReservedChars(o.fs.rootSlash() + o.remote)
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Md5sum() (string, error) {
return "", nil // not supported by one drive
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
return 0
}
return o.size
}
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) {
o.hasMetaData = true
o.size = info.Size
if info.FileSystemInfo != nil {
o.modTime = time.Time(info.FileSystemInfo.LastModifiedDateTime)
} else {
o.modTime = time.Time(info.LastModifiedDateTime)
}
o.id = info.ID
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
if o.hasMetaData {
return nil
}
// leaf, directoryID, err := o.fs.dirCache.FindPath(o.remote, false)
// if err != nil {
// return err
// }
info, _, err := o.fs.readMetaDataForPath(o.srvPath())
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
}
o.setMetaData(info)
return nil
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
return time.Now()
}
return o.modTime
}
// setModTime sets the modification time of the local fs object
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
opts := api.Opts{
Method: "PATCH",
Path: "/drive/root:/" + o.srvPath(),
}
update := api.SetFileSystemInfo{
FileSystemInfo: api.FileSystemInfoFacet{
CreatedDateTime: api.Timestamp(modTime),
LastModifiedDateTime: api.Timestamp(modTime),
},
}
var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
return shouldRetry(resp, err)
})
return info, err
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) {
info, err := o.setModTime(modTime)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to update remote mtime: %v", err)
}
o.setMetaData(info)
}
// Storable returns a boolean showing whether this object storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open() (in io.ReadCloser, err error) {
if o.id == "" {
return nil, fmt.Errorf("Can't download no id")
}
var resp *http.Response
opts := api.Opts{
Method: "GET",
Path: "/drive/items/" + o.id + "/content",
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
// createUploadSession creates an upload session for the object
func (o *Object) createUploadSession() (response *api.CreateUploadResponse, err error) {
opts := api.Opts{
Method: "POST",
Path: "/drive/root:/" + o.srvPath() + ":/upload.createSession",
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
return shouldRetry(resp, err)
})
return
}
// uploadFragment uploads a part
func (o *Object) uploadFragment(url string, start int64, totalSize int64, buf []byte) (err error) {
bufSize := int64(len(buf))
opts := api.Opts{
Method: "PUT",
Path: url,
Absolute: true,
ContentLength: &bufSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start, start+bufSize-1, totalSize),
Body: bytes.NewReader(buf),
}
var response api.UploadFragmentResponse
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
return shouldRetry(resp, err)
})
return err
}
// cancelUploadSession cancels an upload session
func (o *Object) cancelUploadSession(url string) (err error) {
opts := api.Opts{
Method: "DELETE",
Path: url,
Absolute: true,
NoResponse: true,
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
return
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
if chunkSize%(320*1024) != 0 {
return fmt.Errorf("Chunk size %d is not a multiple of 320k", chunkSize)
}
// Create upload session
fs.Debug(o, "Starting multipart upload")
session, err := o.createUploadSession()
if err != nil {
return err
}
uploadURL := session.UploadURL
// Cancel the session if something went wrong
defer func() {
if err != nil {
fs.Debug(o, "Cancelling multipart upload")
cancelErr := o.cancelUploadSession(uploadURL)
if cancelErr != nil {
fs.Log(o, "Failed to cancel multipart upload: %v", err)
}
}
}()
// Upload the chunks
remaining := size
position := int64(0)
buf := make([]byte, int64(chunkSize))
for remaining > 0 {
n := int64(chunkSize)
if remaining < n {
n = remaining
buf = buf[:n]
}
_, err = io.ReadFull(in, buf)
if err != nil {
return err
}
fs.Debug(o, "Uploading segment %d/%d size %d", position, size, n)
err = o.uploadFragment(uploadURL, position, size, buf)
if err != nil {
return err
}
remaining -= n
position += n
}
return nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) (err error) {
var info *api.Item
if size <= int64(uploadCutoff) {
// This is for less than 100 MB of content
var resp *http.Response
opts := api.Opts{
Method: "PUT",
Path: "/drive/root:/" + o.srvPath() + ":/content",
Body: in,
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
})
if err != nil {
return err
}
o.setMetaData(info)
} else {
err = o.uploadMultipart(in, size)
if err != nil {
return err
}
}
// Set the mod time now and read metadata
info, err = o.setModTime(modTime)
if err != nil {
return err
}
o.setMetaData(info)
return nil
}
// Remove an object
func (o *Object) Remove() error {
return o.fs.deleteObject(o.id)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
// _ fs.Copier = (*Fs)(nil)
// _ fs.Mover = (*Fs)(nil)
// _ fs.DirMover = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

56
onedrive/onedrive_test.go Normal file
View File

@@ -0,0 +1,56 @@
// Test OneDrive filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package onedrive_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
"github.com/ncw/rclone/onedrive"
)
func init() {
fstests.NilObject = fs.Object((*onedrive.Object)(nil))
fstests.RemoteName = "TestOneDrive:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

91
onedrive/replace.go Normal file
View File

@@ -0,0 +1,91 @@
/*
Translate file names for one drive
OneDrive reserved characters
The following characters are OneDrive reserved characters, and can't
be used in OneDrive folder and file names.
onedrive-reserved = "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|"
onedrive-business-reserved
= "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|" / "#" / "%"
Note: Folder names can't end with a period (.).
Note: OneDrive for Business file or folder names cannot begin with a
tilde ('~').
*/
package onedrive
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// Onedrive has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'*': '', // FULLWIDTH ASTERISK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'?': '', // FULLWIDTH QUESTION MARK
':': '', // FULLWIDTH COLON
'|': '', // FULLWIDTH VERTICAL LINE
'#': '', // FULLWIDTH NUMBER SIGN
'%': '', // FULLWIDTH PERCENT SIGN
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
'.': '', // FULLWIDTH FULL STOP
'~': '', // FULLWIDTH TILDE
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Folder names can't end with a period '.'
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
// OneDrive for Business file or folder names cannot begin with a tilde '~'
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
// Apparently file names can't start with space either
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Replace reserved characters
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

30
onedrive/replace_test.go Normal file
View File

@@ -0,0 +1,30 @@
package onedrive
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:|#%".~`, `.~`},
{`\*<>?:|#%".~/\*<>?:|#%".~`, `.~/.~`},
{" leading space", "␠leading space"},
{"~leading tilde", "leading tilde"},
{"trailing dot.", "trailing dot"},
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
{"~leading tilde/~leading tilde/~leading tilde", "leading tilde/leading tilde/leading tilde"},
{"trailing dot./trailing dot./trailing dot.", "trailing dot/trailing dot/trailing dot"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

293
pacer/pacer.go Normal file
View File

@@ -0,0 +1,293 @@
// Package pacer makes pacing and retrying API calls easy
package pacer
import (
"math/rand"
"sync"
"time"
"github.com/ncw/rclone/fs"
)
// Pacer state
type Pacer struct {
mu sync.Mutex // Protecting read/writes
minSleep time.Duration // minimum sleep time
maxSleep time.Duration // maximum sleep time
decayConstant uint // decay constant
pacer chan struct{} // To pace the operations
sleepTime time.Duration // Time to sleep for each transaction
retries int // Max number of retries
maxConnections int // Maximum number of concurrent connections
connTokens chan struct{} // Connection tokens
calculatePace func(bool) // switchable pacing algorithm - call with mu held
consecutiveRetries int // number of consecutive retries
}
// Type is for selecting different pacing algorithms
type Type int
const (
// DefaultPacer is a truncated exponential attack and decay.
//
// On retries the sleep time is doubled, on non errors then
// sleeptime decays according to the decay constant as set
// with SetDecayConstant.
//
// The sleep never goes below that set with SetMinSleep or
// above that set with SetMaxSleep.
DefaultPacer = Type(iota)
// AmazonCloudDrivePacer is a specialised pacer for Amazon Cloud Drive
//
// It implements a truncated exponential backoff strategy with
// randomization. Normally operations are paced at the
// interval set with SetMinSleep. On errors the sleep timer
// is set to 0..2**retries seconds.
//
// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
AmazonCloudDrivePacer
)
// Paced is a function which is called by the Call and CallNoRetry
// methods. It should return a boolean, true if it would like to be
// retried, and an error. This error may be returned or returned
// wrapped in a RetryError.
type Paced func() (bool, error)
// New returns a Pacer with sensible defaults
func New() *Pacer {
p := &Pacer{
minSleep: 10 * time.Millisecond,
maxSleep: 2 * time.Second,
decayConstant: 2,
retries: 10,
pacer: make(chan struct{}, 1),
}
p.sleepTime = p.minSleep
p.SetPacer(DefaultPacer)
p.SetMaxConnections(fs.Config.Checkers + fs.Config.Transfers)
// Put the first pacing token in
p.pacer <- struct{}{}
return p
}
// SetMinSleep sets the minimum sleep time for the pacer
func (p *Pacer) SetMinSleep(t time.Duration) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.minSleep = t
p.sleepTime = p.minSleep
return p
}
// SetMaxSleep sets the maximum sleep time for the pacer
func (p *Pacer) SetMaxSleep(t time.Duration) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.maxSleep = t
p.sleepTime = p.minSleep
return p
}
// SetMaxConnections sets the maximum number of concurrent connections.
// Setting the value to 0 will allow unlimited number of connections.
// Should not be changed once you have started calling the pacer.
// By default this will be set to fs.Config.Checkers.
func (p *Pacer) SetMaxConnections(n int) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.maxConnections = n
if n <= 0 {
p.connTokens = nil
} else {
p.connTokens = make(chan struct{}, n)
for i := 0; i < n; i++ {
p.connTokens <- struct{}{}
}
}
return p
}
// SetDecayConstant sets the decay constant for the pacer
//
// This is the speed the time falls back to the minimum after errors
// have occurred.
//
// bigger for slower decay, exponential
func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.decayConstant = decay
return p
}
// SetRetries sets the max number of tries for Call
func (p *Pacer) SetRetries(retries int) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.retries = retries
return p
}
// SetPacer sets the pacing algorithm
//
// It will choose the default algorithm if an incorrect value is
// passed in.
func (p *Pacer) SetPacer(t Type) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
switch t {
case AmazonCloudDrivePacer:
p.calculatePace = p.acdPacer
default:
p.calculatePace = p.defaultPacer
}
return p
}
// Start a call to the API
//
// This must be called as a pair with endCall
//
// This waits for the pacer token
func (p *Pacer) beginCall() {
// pacer starts with a token in and whenever we take one out
// XXX ms later we put another in. We could do this with a
// Ticker more accurately, but then we'd have to work out how
// not to run it when it wasn't needed
<-p.pacer
if p.maxConnections > 0 {
<-p.connTokens
}
p.mu.Lock()
// Restart the timer
go func(t time.Duration) {
// fs.Debug(f, "New sleep for %v at %v", t, time.Now())
time.Sleep(t)
p.pacer <- struct{}{}
}(p.sleepTime)
p.mu.Unlock()
}
// exponentialImplementation implements a exponentialImplementation up
// and down pacing algorithm
//
// See the description for DefaultPacer
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
//
// Call with p.mu held
func (p *Pacer) defaultPacer(retry bool) {
oldSleepTime := p.sleepTime
if retry {
p.sleepTime *= 2
if p.sleepTime > p.maxSleep {
p.sleepTime = p.maxSleep
}
if p.sleepTime != oldSleepTime {
fs.Debug("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
}
} else {
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
if p.sleepTime < p.minSleep {
p.sleepTime = p.minSleep
}
if p.sleepTime != oldSleepTime {
fs.Debug("pacer", "Reducing sleep to %v", p.sleepTime)
}
}
}
// acdPacer implements a truncated exponential backoff
// strategy with randomization for Amazon Cloud Drive
//
// See the description for AmazonCloudDrivePacer
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
//
// Call with p.mu held
func (p *Pacer) acdPacer(retry bool) {
consecutiveRetries := p.consecutiveRetries
if consecutiveRetries == 0 {
if p.sleepTime != p.minSleep {
p.sleepTime = p.minSleep
fs.Debug("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
}
} else {
if consecutiveRetries > 9 {
consecutiveRetries = 9
}
// consecutiveRetries starts at 1 so
// maxSleep is 2**(consecutiveRetries-1) seconds
maxSleep := time.Second << uint(consecutiveRetries-1)
// actual sleep is random from 0..maxSleep
p.sleepTime = time.Duration(rand.Int63n(int64(maxSleep)))
if p.sleepTime < p.minSleep {
p.sleepTime = p.minSleep
}
fs.Debug("pacer", "Rate limited, sleeping for %v (%d retries)", p.sleepTime, consecutiveRetries)
}
}
// endCall implements the pacing algorithm
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
func (p *Pacer) endCall(retry bool) {
if p.maxConnections > 0 {
p.connTokens <- struct{}{}
}
p.mu.Lock()
if retry {
p.consecutiveRetries++
} else {
p.consecutiveRetries = 0
}
p.calculatePace(retry)
p.mu.Unlock()
}
// call implements Call but with settable retries
func (p *Pacer) call(fn Paced, retries int) (err error) {
var retry bool
for i := 0; i < retries; i++ {
p.beginCall()
retry, err = fn()
p.endCall(retry)
if !retry {
break
}
}
if retry {
err = fs.RetryError(err)
}
return err
}
// Call paces the remote operations to not exceed the limits and retry
// on rate limit exceeded
//
// This calls fn, expecting it to return a retry flag and an
// error. This error may be returned wrapped in a RetryError if the
// number of retries is exceeded.
func (p *Pacer) Call(fn Paced) (err error) {
p.mu.Lock()
retries := p.retries
p.mu.Unlock()
return p.call(fn, retries)
}
// CallNoRetry paces the remote operations to not exceed the limits
// and return a retry error on rate limit exceeded
//
// This calls fn and wraps the output in a RetryError if it would like
// it to be retried
func (p *Pacer) CallNoRetry(fn Paced) error {
return p.call(fn, 1)
}

324
pacer/pacer_test.go Normal file
View File

@@ -0,0 +1,324 @@
package pacer
import (
"fmt"
"testing"
"time"
"github.com/ncw/rclone/fs"
)
func TestNew(t *testing.T) {
p := New()
if p.minSleep != 10*time.Millisecond {
t.Errorf("minSleep")
}
if p.maxSleep != 2*time.Second {
t.Errorf("maxSleep")
}
if p.sleepTime != p.minSleep {
t.Errorf("sleepTime")
}
if p.retries != 10 {
t.Errorf("retries")
}
if p.decayConstant != 2 {
t.Errorf("decayConstant")
}
if cap(p.pacer) != 1 {
t.Errorf("pacer 1")
}
if len(p.pacer) != 1 {
t.Errorf("pacer 2")
}
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.defaultPacer) {
t.Errorf("calculatePace")
}
if p.maxConnections != fs.Config.Checkers+fs.Config.Transfers {
t.Errorf("maxConnections")
}
if cap(p.connTokens) != fs.Config.Checkers+fs.Config.Transfers {
t.Errorf("connTokens")
}
if p.consecutiveRetries != 0 {
t.Errorf("consecutiveRetries")
}
}
func TestSetMinSleep(t *testing.T) {
p := New().SetMinSleep(1 * time.Millisecond)
if p.minSleep != 1*time.Millisecond {
t.Errorf("didn't set")
}
}
func TestSetMaxSleep(t *testing.T) {
p := New().SetMaxSleep(100 * time.Second)
if p.maxSleep != 100*time.Second {
t.Errorf("didn't set")
}
}
func TestMaxConnections(t *testing.T) {
p := New().SetMaxConnections(20)
if p.maxConnections != 20 {
t.Errorf("maxConnections")
}
if cap(p.connTokens) != 20 {
t.Errorf("connTokens")
}
p.SetMaxConnections(0)
if p.maxConnections != 0 {
t.Errorf("maxConnections is not 0")
}
if p.connTokens != nil {
t.Errorf("connTokens is not nil")
}
}
func TestSetDecayConstant(t *testing.T) {
p := New().SetDecayConstant(17)
if p.decayConstant != 17 {
t.Errorf("didn't set")
}
}
func TestSetRetries(t *testing.T) {
p := New().SetRetries(18)
if p.retries != 18 {
t.Errorf("didn't set")
}
}
func TestSetPacer(t *testing.T) {
p := New().SetPacer(AmazonCloudDrivePacer)
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.acdPacer) {
t.Errorf("calculatePace is not acdPacer")
}
p.SetPacer(DefaultPacer)
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.defaultPacer) {
t.Errorf("calculatePace is not defaultPacer")
}
}
// emptyTokens empties the pacer of all its tokens
func emptyTokens(p *Pacer) {
for len(p.pacer) != 0 {
<-p.pacer
}
for len(p.connTokens) != 0 {
<-p.connTokens
}
}
// waitForPace waits for duration for the pace to arrive
// returns the time that it arrived or a zero time
func waitForPace(p *Pacer, duration time.Duration) (when time.Time) {
select {
case <-time.After(duration):
return
case <-p.pacer:
return time.Now()
}
}
func TestBeginCall(t *testing.T) {
p := New().SetMaxConnections(10).SetMinSleep(1 * time.Millisecond)
emptyTokens(p)
go p.beginCall()
if !waitForPace(p, 10*time.Millisecond).IsZero() {
t.Errorf("beginSleep fired too early #1")
}
startTime := time.Now()
p.pacer <- struct{}{}
time.Sleep(1 * time.Millisecond)
connTime := time.Now()
p.connTokens <- struct{}{}
time.Sleep(1 * time.Millisecond)
paceTime := waitForPace(p, 10*time.Millisecond)
if paceTime.IsZero() {
t.Errorf("beginSleep didn't fire")
} else if paceTime.Sub(startTime) < 0 {
t.Errorf("pace arrived before returning pace token")
} else if paceTime.Sub(connTime) < 0 {
t.Errorf("pace arrived before sending conn token")
}
}
func TestBeginCallZeroConnections(t *testing.T) {
p := New().SetMaxConnections(0).SetMinSleep(1 * time.Millisecond)
emptyTokens(p)
go p.beginCall()
if !waitForPace(p, 10*time.Millisecond).IsZero() {
t.Errorf("beginSleep fired too early #1")
}
startTime := time.Now()
p.pacer <- struct{}{}
time.Sleep(1 * time.Millisecond)
paceTime := waitForPace(p, 10*time.Millisecond)
if paceTime.IsZero() {
t.Errorf("beginSleep didn't fire")
} else if paceTime.Sub(startTime) < 0 {
t.Errorf("pace arrived before returning pace token")
}
}
func TestDefaultPacer(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second).SetDecayConstant(2)
for _, test := range []struct {
in time.Duration
retry bool
want time.Duration
}{
{time.Millisecond, true, 2 * time.Millisecond},
{time.Second, true, time.Second},
{(3 * time.Second) / 4, true, time.Second},
{time.Second, false, 750 * time.Millisecond},
{1000 * time.Microsecond, false, time.Millisecond},
{1200 * time.Microsecond, false, time.Millisecond},
} {
p.sleepTime = test.in
p.defaultPacer(test.retry)
got := p.sleepTime
if got != test.want {
t.Errorf("bad sleep want %v got %v", test.want, got)
}
}
}
func TestAmazonCloudDrivePacer(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetPacer(AmazonCloudDrivePacer).SetMaxSleep(time.Second).SetDecayConstant(2)
// Do lots of times because of the random number!
for _, test := range []struct {
in time.Duration
consecutiveRetries int
retry bool
want time.Duration
}{
{time.Millisecond, 0, true, time.Millisecond},
{10 * time.Millisecond, 0, true, time.Millisecond},
{1 * time.Second, 1, true, 500 * time.Millisecond},
{1 * time.Second, 2, true, 1 * time.Second},
{1 * time.Second, 3, true, 2 * time.Second},
{1 * time.Second, 4, true, 4 * time.Second},
{1 * time.Second, 5, true, 8 * time.Second},
{1 * time.Second, 6, true, 16 * time.Second},
{1 * time.Second, 7, true, 32 * time.Second},
{1 * time.Second, 8, true, 64 * time.Second},
{1 * time.Second, 9, true, 128 * time.Second},
{1 * time.Second, 10, true, 128 * time.Second},
{1 * time.Second, 11, true, 128 * time.Second},
} {
const n = 1000
var sum time.Duration
// measure average time over n cycles
for i := 0; i < n; i++ {
p.sleepTime = test.in
p.consecutiveRetries = test.consecutiveRetries
p.acdPacer(test.retry)
sum += p.sleepTime
}
got := sum / n
//t.Logf("%+v: got = %v", test, got)
if got < (test.want*9)/10 || got > (test.want*11)/10 {
t.Fatalf("%+v: bad sleep want %v+/-10%% got %v", test, test.want, got)
}
}
}
func TestEndCall(t *testing.T) {
p := New().SetMaxConnections(5)
emptyTokens(p)
p.consecutiveRetries = 1
p.endCall(true)
if len(p.connTokens) != 1 {
t.Errorf("Expecting 1 token")
}
if p.consecutiveRetries != 2 {
t.Errorf("Bad consecutive retries")
}
}
func TestEndCallZeroConnections(t *testing.T) {
p := New().SetMaxConnections(0)
emptyTokens(p)
p.consecutiveRetries = 1
p.endCall(false)
if len(p.connTokens) != 0 {
t.Errorf("Expecting 0 token")
}
if p.consecutiveRetries != 0 {
t.Errorf("Bad consecutive retries")
}
}
var errFoo = fmt.Errorf("Foo")
type dummyPaced struct {
retry bool
called int
}
func (dp *dummyPaced) fn() (bool, error) {
dp.called++
return dp.retry, errFoo
}
func Test_callNoRetry(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond)
dp := &dummyPaced{retry: false}
err := p.call(dp.fn, 10)
if dp.called != 1 {
t.Errorf("called want %d got %d", 1, dp.called)
}
if err != errFoo {
t.Errorf("err want %v got %v", errFoo, err)
}
}
func Test_callRetry(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond)
dp := &dummyPaced{retry: true}
err := p.call(dp.fn, 10)
if dp.called != 10 {
t.Errorf("called want %d got %d", 10, dp.called)
}
if err == errFoo {
t.Errorf("err didn't want %v got %v", errFoo, err)
}
_, ok := err.(fs.Retry)
if !ok {
t.Errorf("didn't return a retry error")
}
}
func TestCall(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond).SetRetries(20)
dp := &dummyPaced{retry: true}
err := p.Call(dp.fn)
if dp.called != 20 {
t.Errorf("called want %d got %d", 20, dp.called)
}
_, ok := err.(fs.Retry)
if !ok {
t.Errorf("didn't return a retry error")
}
}
func TestCallNoRetry(t *testing.T) {
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond).SetRetries(20)
dp := &dummyPaced{retry: true}
err := p.CallNoRetry(dp.fn)
if dp.called != 1 {
t.Errorf("called want %d got %d", 1, dp.called)
}
_, ok := err.(fs.Retry)
if !ok {
t.Errorf("didn't return a retry error")
}
}

248
rclone.go
View File

@@ -12,12 +12,16 @@ import (
"strings"
"time"
"github.com/ogier/pflag"
"github.com/spf13/pflag"
"github.com/ncw/rclone/fs"
// Active file systems
_ "github.com/ncw/rclone/amazonclouddrive"
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/onedrive"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
)
@@ -26,18 +30,22 @@ import (
var (
// Flags
cpuprofile = pflag.StringP("cpuprofile", "", "", "Write cpu profile to file")
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats")
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats (0 to disable)")
version = pflag.BoolP("version", "V", false, "Print the version number")
logFile = pflag.StringP("log-file", "", "", "Log everything to this file")
retries = pflag.IntP("retries", "", 3, "Retry operations this many times if they fail")
)
// Command holds info about the current running command
type Command struct {
Name string
Help string
ArgsHelp string
Run func(fdst, fsrc fs.Fs)
Run func(fdst, fsrc fs.Fs) error
MinArgs int
MaxArgs int
NoStats bool
Retry bool
}
// checkArgs checks there are enough arguments and prints a message if not
@@ -53,124 +61,163 @@ func (cmd *Command) checkArgs(args []string) {
}
}
// Commands is a slice of possible Command~s
var Commands = []Command{
{
Name: "copy",
ArgsHelp: "source://path dest://path",
ArgsHelp: "source:path dest:path",
Help: `
Copy the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
unchanged files, testing by size and modification time or
MD5SUM. Doesn't delete files from the destination.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Sync(fdst, fsrc, false)
if err != nil {
log.Fatalf("Failed to copy: %v", err)
}
Run: func(fdst, fsrc fs.Fs) error {
return fs.CopyDir(fdst, fsrc)
},
MinArgs: 2,
MaxArgs: 2,
Retry: true,
},
{
Name: "sync",
ArgsHelp: "source://path dest://path",
ArgsHelp: "source:path dest:path",
Help: `
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the --dry-run flag.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Sync(fdst, fsrc, true)
if err != nil {
log.Fatalf("Failed to sync: %v", err)
}
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the --dry-run flag.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.Sync(fdst, fsrc)
},
MinArgs: 2,
MaxArgs: 2,
Retry: true,
},
{
Name: "move",
ArgsHelp: "source:path dest:path",
Help: `
Moves the source to the destination. This is equivalent to a
copy followed by a purge, but may use server side operations
to speed it up. Since this can cause data loss, test first
with the --dry-run flag.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.MoveDir(fdst, fsrc)
},
MinArgs: 2,
MaxArgs: 2,
Retry: true,
},
{
Name: "ls",
ArgsHelp: "[remote://path]",
ArgsHelp: "remote:path",
Help: `
List all the objects in the the path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.List(fdst)
if err != nil {
log.Fatalf("Failed to list: %v", err)
}
List all the objects in the the path with size and path.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.List(fdst, os.Stdout)
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "lsd",
ArgsHelp: "[remote://path]",
ArgsHelp: "remote:path",
Help: `
List all directories/containers/buckets in the the path.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.ListDir(fdst)
Run: func(fdst, fsrc fs.Fs) error {
return fs.ListDir(fdst, os.Stdout)
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "lsl",
ArgsHelp: "remote:path",
Help: `
List all the objects in the the path with modification time,
size and path.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.ListLong(fdst, os.Stdout)
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "md5sum",
ArgsHelp: "remote:path",
Help: `
Produces an md5sum file for all the objects in the path. This
is in the same format as the standard md5sum tool produces.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.Md5sum(fdst, os.Stdout)
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "size",
ArgsHelp: "remote:path",
Help: `
Returns the total size of objects in remote:path and the number
of objects.`,
Run: func(fdst, fsrc fs.Fs) error {
objects, size, err := fs.Count(fdst)
if err != nil {
log.Fatalf("Failed to listdir: %v", err)
return err
}
fmt.Printf("Total objects: %d\n", objects)
fmt.Printf("Total size: %v (%d bytes)\n", fs.SizeSuffix(size), size)
return nil
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "mkdir",
ArgsHelp: "remote://path",
ArgsHelp: "remote:path",
Help: `
Make the path if it doesn't already exist`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Mkdir(fdst)
if err != nil {
log.Fatalf("Failed to mkdir: %v", err)
}
Run: func(fdst, fsrc fs.Fs) error {
return fs.Mkdir(fdst)
},
MinArgs: 1,
MaxArgs: 1,
Retry: true,
},
{
Name: "rmdir",
ArgsHelp: "remote://path",
ArgsHelp: "remote:path",
Help: `
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Rmdir(fdst)
if err != nil {
log.Fatalf("Failed to rmdir: %v", err)
}
Run: func(fdst, fsrc fs.Fs) error {
return fs.Rmdir(fdst)
},
MinArgs: 1,
MaxArgs: 1,
Retry: true,
},
{
Name: "purge",
ArgsHelp: "remote://path",
ArgsHelp: "remote:path",
Help: `
Remove the path and all of its contents.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Purge(fdst)
if err != nil {
log.Fatalf("Failed to purge: %v", err)
}
Run: func(fdst, fsrc fs.Fs) error {
return fs.Purge(fdst)
},
MinArgs: 1,
MaxArgs: 1,
Retry: true,
},
{
Name: "check",
ArgsHelp: "source://path dest://path",
ArgsHelp: "source:path dest:path",
Help: `
Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.`,
Run: func(fdst, fsrc fs.Fs) {
err := fs.Check(fdst, fsrc)
if err != nil {
log.Fatalf("Failed to check: %v", err)
}
Run: func(fdst, fsrc fs.Fs) error {
return fs.Check(fdst, fsrc)
},
MinArgs: 2,
MaxArgs: 2,
@@ -179,8 +226,9 @@ var Commands = []Command{
Name: "config",
Help: `
Enter an interactive configuration session.`,
Run: func(fdst, fsrc fs.Fs) {
Run: func(fdst, fsrc fs.Fs) error {
fs.EditConfig()
return nil
},
NoStats: true,
},
@@ -200,7 +248,7 @@ Syntax: [options] subcommand <parameters> <parameters...>
Subcommands:
`, Version)
`, fs.Version)
for i := range Commands {
cmd := &Commands[i]
fmt.Fprintf(os.Stderr, " %s %s\n", cmd.Name, cmd.ArgsHelp)
@@ -210,7 +258,8 @@ Subcommands:
fmt.Fprintf(os.Stderr, "Options:\n")
pflag.PrintDefaults()
fmt.Fprintf(os.Stderr, `
It is only necessary to use a unique prefix of the subcommand, eg 'up' for 'upload'.
It is only necessary to use a unique prefix of the subcommand, eg 'mo'
for 'move'.
`)
}
@@ -221,7 +270,7 @@ func fatal(message string, args ...interface{}) {
os.Exit(1)
}
// Parse the command line flags
// ParseFlags parses the command line flags
func ParseFlags() {
pflag.Usage = syntaxError
pflag.Parse()
@@ -235,12 +284,16 @@ func ParseFlags() {
fs.Stats.Error()
log.Fatal(err)
}
pprof.StartCPUProfile(f)
err = pprof.StartCPUProfile(f)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
defer pprof.StopCPUProfile()
}
}
// Parse the command from the command line
// ParseCommand parses the command from the command line
func ParseCommand() (*Command, []string) {
args := pflag.Args()
if len(args) < 1 {
@@ -251,6 +304,7 @@ func ParseCommand() (*Command, []string) {
args = args[1:]
// Find the command doing a prefix match
var found = make([]*Command, 0, 1)
var command *Command
for i := range Commands {
trialCommand := &Commands[i]
@@ -259,16 +313,24 @@ func ParseCommand() (*Command, []string) {
command = trialCommand
break
} else if strings.HasPrefix(trialCommand.Name, cmd) {
if command != nil {
fs.Stats.Error()
log.Fatalf("Not unique - matches multiple commands %q", cmd)
}
command = trialCommand
found = append(found, trialCommand)
}
}
if command == nil {
fs.Stats.Error()
log.Fatalf("Unknown command %q", cmd)
switch len(found) {
case 0:
fs.Stats.Error()
log.Fatalf("Unknown command %q", cmd)
case 1:
command = found[0]
default:
fs.Stats.Error()
var names []string
for _, cmd := range found {
names = append(names, `"`+cmd.Name+`"`)
}
log.Fatalf("Not unique - matches multiple commands: %s", strings.Join(names, ", "))
}
}
if command.Run == nil {
syntaxError()
@@ -277,7 +339,7 @@ func ParseCommand() (*Command, []string) {
return command, args
}
// Create a Fs from a name
// NewFs creates a Fs from a name
func NewFs(remote string) fs.Fs {
f, err := fs.NewFs(remote)
if err != nil {
@@ -287,8 +349,11 @@ func NewFs(remote string) fs.Fs {
return f
}
// Print the stats every statsInterval
// StartStats prints the stats every statsInterval
func StartStats() {
if *statsInterval <= 0 {
return
}
go func() {
ch := time.Tick(*statsInterval)
for {
@@ -301,11 +366,25 @@ func StartStats() {
func main() {
ParseFlags()
if *version {
fmt.Printf("rclone %s\n", Version)
fmt.Printf("rclone %s\n", fs.Version)
os.Exit(0)
}
command, args := ParseCommand()
// Log file output
if *logFile != "" {
f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
log.Fatalf("Failed to open log file: %v", err)
}
_, err = f.Seek(0, os.SEEK_END)
if err != nil {
log.Printf("Failed to seek log file to end: %v", err)
}
log.SetOutput(f)
redirectStderr(f)
}
// Make source and destination fs
var fdst, fsrc fs.Fs
if len(args) >= 1 {
@@ -324,12 +403,29 @@ func main() {
// Run the actual command
if command.Run != nil {
command.Run(fdst, fsrc)
if !command.NoStats {
fmt.Println(fs.Stats)
var err error
for try := 1; try <= *retries; try++ {
err = command.Run(fdst, fsrc)
if !command.Retry || (err == nil && !fs.Stats.Errored()) {
break
}
if err != nil {
fs.Log(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
} else {
fs.Log(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
}
if try < *retries {
fs.Stats.ResetErrors()
}
}
if err != nil {
log.Fatalf("Failed to %s: %v", command.Name, err)
}
if !command.NoStats && (!fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0) {
fmt.Fprintln(os.Stderr, fs.Stats)
}
if fs.Config.Verbose {
log.Printf("*** Go routines at exit %d\n", runtime.NumGoroutine())
fs.Debug(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
}
if fs.Stats.Errored() {
os.Exit(1)

View File

@@ -1,334 +0,0 @@
// Test rclone by doing real transactions to a storage provider to and
// from the local disk
package main
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ogier/pflag"
// Active file systems
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
)
// Globals
var (
localName, remoteName string
version = pflag.BoolP("version", "V", false, "Print the version number")
)
// Represents an item for checking
type Item struct {
Path string
Md5sum string
ModTime time.Time
Size int64
}
// Represents all items for checking
type Items struct {
byName map[string]*Item
items []Item
}
// Make an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[items[i].Path] = &items[i]
}
return is
}
// Check off an item
func (is *Items) Find(obj fs.Object) {
i, ok := is.byName[obj.Remote()]
if !ok {
log.Fatalf("Unexpected file %q", obj.Remote())
}
delete(is.byName, obj.Remote())
// Check attributes
Md5sum, err := obj.Md5sum()
if err != nil {
log.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
}
if i.Md5sum != Md5sum {
log.Fatalf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
}
if i.Size != obj.Size() {
log.Fatalf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
}
// check the mod time to the given precision
modTime := obj.ModTime()
dt := modTime.Sub(i.ModTime)
if dt >= fs.Config.ModifyWindow || dt <= -fs.Config.ModifyWindow {
log.Fatalf("%s: Modification time difference too big |%s| > %s (%s vs %s)", obj.Remote(), dt, fs.Config.ModifyWindow, modTime, i.ModTime)
}
}
// Check all done
func (is *Items) Done() {
if len(is.byName) != 0 {
for name := range is.byName {
log.Printf("Not found %q", name)
}
log.Fatalf("%d objects not found", len(is.byName))
}
}
// Checks the fs to see if it has the expected contents
func CheckListing(f fs.Fs, items []Item) {
is := NewItems(items)
for obj := range f.List() {
is.Find(obj)
}
is.Done()
}
// Parse a time string or explode
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// Write a file
func WriteFile(filePath, content string, t time.Time) {
// FIXME make directories?
filePath = path.Join(localName, filePath)
err := ioutil.WriteFile(filePath, []byte(content), 0600)
if err != nil {
log.Fatalf("Failed to write file %q: %v", filePath, err)
}
err = os.Chtimes(filePath, t, t)
if err != nil {
log.Fatalf("Failed to chtimes file %q: %v", filePath, err)
}
}
// Create a random string
func RandomString(n int) string {
source := "abcdefghijklmnopqrstuvwxyz0123456789"
out := make([]byte, n)
for i := range out {
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
func TestMkdir(flocal, fremote fs.Fs) {
err := fs.Mkdir(fremote)
if err != nil {
log.Fatalf("Mkdir failed: %v", err)
}
items := []Item{}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
var t1 = Time("2001-02-03T04:05:06.999999999Z")
var t2 = Time("2011-12-25T12:59:59.123456789Z")
func TestCopy(flocal, fremote fs.Fs) {
WriteFile("empty space", "", t1)
err := fs.Sync(fremote, flocal, false)
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
items := []Item{
{Path: "empty space", Size: 0, ModTime: t1, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
func TestSync(flocal, fremote fs.Fs) {
log.Printf("Sync after changing file modtime only")
err := os.Chtimes(localName+"/empty space", t2, t2)
if err != nil {
log.Fatalf("Chtimes failed: %v", err)
}
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items := []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after adding a file")
WriteFile("potato", "------------------------------------------------------------", t1)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after changing a file's size only")
WriteFile("potato", "smaller but same date", t1)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t1, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after removing a file")
err = os.Remove(localName + "/potato")
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
func TestLs(flocal, fremote fs.Fs) {
// Underlying List has been tested above, so we just make sure it runs
err := fs.List(fremote)
if err != nil {
log.Fatalf("List failed: %v", err)
}
}
func TestLsd(flocal, fremote fs.Fs) {
}
func TestCheck(flocal, fremote fs.Fs) {
}
func TestPurge(flocal, fremote fs.Fs) {
err := fs.Purge(fremote)
if err != nil {
log.Fatalf("Purge failed: %v", err)
}
}
func TestRmdir(flocal, fremote fs.Fs) {
err := fs.Rmdir(fremote)
if err != nil {
log.Fatalf("Rmdir failed: %v", err)
}
}
func syntaxError() {
fmt.Fprintf(os.Stderr, `Test rclone with a remote to find bugs in either - %s.
Syntax: [options] remote:
Need a remote: as argument. This will create a random container or
directory under it and perform tests on it, deleting it at the end.
Options:
`, Version)
pflag.PrintDefaults()
}
// Clean the temporary directory
func cleanTempDir() {
log.Printf("Cleaning temporary directory: %q", localName)
err := os.RemoveAll(localName)
if err != nil {
log.Printf("Failed to remove %q: %v", localName, err)
}
}
func main() {
pflag.Usage = syntaxError
pflag.Parse()
if *version {
fmt.Printf("rclonetest %s\n", Version)
os.Exit(0)
}
fs.LoadConfig()
rand.Seed(time.Now().UnixNano())
args := pflag.Args()
if len(args) != 1 {
syntaxError()
os.Exit(1)
}
remoteName = args[0]
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
remoteName += RandomString(32)
log.Printf("Testing with remote %q", remoteName)
var err error
localName, err = ioutil.TempDir("", "rclone")
if err != nil {
log.Fatalf("Failed to create temp dir: %v", err)
}
log.Printf("Testing with local %q", localName)
fremote, err := fs.NewFs(remoteName)
if err != nil {
log.Fatalf("Failed to make %q: %v", remoteName, err)
}
flocal, err := fs.NewFs(localName)
if err != nil {
log.Fatalf("Failed to make %q: %v", remoteName, err)
}
fs.CalculateModifyWindow(fremote, flocal)
TestMkdir(flocal, fremote)
TestCopy(flocal, fremote)
TestSync(flocal, fremote)
TestLs(flocal, fremote)
TestLsd(flocal, fremote)
TestCheck(flocal, fremote)
TestPurge(flocal, fremote)
//TestRmdir(flocal, fremote)
cleanTempDir()
log.Printf("Tests OK")
}

View File

@@ -1,3 +0,0 @@
package main
const Version = "v0.98"

15
redirect_stderr.go Normal file
View File

@@ -0,0 +1,15 @@
// Log the panic to the log file - for oses which can't do this
//+build !windows,!unix
package main
import (
"log"
"os"
)
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
log.Printf("Can't redirect stderr to file")
}

19
redirect_stderr_unix.go Normal file
View File

@@ -0,0 +1,19 @@
// Log the panic under unix to the log file
//+build unix
package main
import (
"log"
"os"
"syscall"
)
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
err := syscall.Dup2(int(f.Fd()), int(os.Stderr.Fd()))
if err != nil {
log.Fatalf("Failed to redirect stderr to file: %v", err)
}
}

View File

@@ -0,0 +1,39 @@
// Log the panic under windows to the log file
//
// Code from minix, via
//
// http://play.golang.org/p/kLtct7lSUg
//+build windows
package main
import (
"log"
"os"
"syscall"
)
var (
kernel32 = syscall.MustLoadDLL("kernel32.dll")
procSetStdHandle = kernel32.MustFindProc("SetStdHandle")
)
func setStdHandle(stdhandle int32, handle syscall.Handle) error {
r0, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
if r0 == 0 {
if e1 != 0 {
return error(e1)
}
return syscall.EINVAL
}
return nil
}
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
err := setStdHandle(syscall.STD_ERROR_HANDLE, syscall.Handle(f.Fd()))
if err != nil {
log.Fatalf("Failed to redirect stderr to file: %v", err)
}
}

583
s3/s3.go
View File

@@ -1,72 +1,94 @@
// S3 interface
// Package s3 provides an interface to Amazon S3 oject storage
package s3
// FIXME need to prevent anything but ListDir working for s3://
/*
Progress of port to aws-sdk
* Don't really need o.meta at all?
What happens if you CTRL-C a multipart upload
* get an incomplete upload
* disappears when you delete the bucket
*/
import (
"errors"
"fmt"
"io"
"mime"
"net/http"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/ncw/goamz/aws"
"github.com/ncw/goamz/s3"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
)
// Register with Fs
func init() {
fs.Register(&fs.FsInfo{
fs.Register(&fs.Info{
Name: "s3",
NewFs: NewFs,
// AWS endpoints: http://docs.amazonwebservices.com/general/latest/gr/rande.html#s3_region
Options: []fs.Option{{
Name: "access_key_id",
Help: "AWS Access Key ID.",
Help: "AWS Access Key ID - leave blank for anonymous access.",
}, {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password). ",
Help: "AWS Secret Access Key (password) - leave blank for anonymous access.",
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.",
Name: "region",
Help: "Region to connect to.",
Examples: []fs.OptionExample{{
Value: "https://s3.amazonaws.com/",
Value: "us-east-1",
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
}, {
Value: "https://s3-external-1.amazonaws.com",
Help: "US Region, Northern Virginia only.\nLeave location constraint empty.",
}, {
Value: "https://s3-us-west-2.amazonaws.com",
Value: "us-west-2",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "https://s3-us-west-1.amazonaws.com",
Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "https://s3-eu-west-1.amazonaws.com",
Value: "eu-west-1",
Help: "EU (Ireland) Region Region\nNeeds location constraint EU or eu-west-1.",
}, {
Value: "https://s3-ap-southeast-1.amazonaws.com",
Value: "eu-central-1",
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
}, {
Value: "https://s3-ap-southeast-2.amazonaws.com",
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint .",
Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
}, {
Value: "https://s3-ap-northeast-1.amazonaws.com",
Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
}, {
Value: "https://s3-sa-east-1.amazonaws.com",
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}, {
Value: "other-v2-signature",
Help: "If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.",
}, {
Value: "other-v4-signature",
Help: "If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.\nSpecify if using an S3 clone such as Ceph.",
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Endpoint.",
Help: "Location constraint - must be set to match the Region. Used when creating buckets only.",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
@@ -101,36 +123,53 @@ func init() {
// Constants
const (
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
listChunkSize = 1024 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
)
// FsS3 represents a remote s3 server
type FsS3 struct {
c *s3.S3 // the connection to the s3 server
b *s3.Bucket // the connection to the bucket
bucket string // the bucket we are working on
perm s3.ACL // permissions for new buckets / objects
root string // root of the bucket - ignore all objects above this
// Fs represents a remote s3 server
type Fs struct {
name string // the name of the remote
c *s3.S3 // the connection to the s3 server
ses *session.Session // the s3 session
bucket string // the bucket we are working on
perm string // permissions for new buckets / objects
root string // root of the bucket - ignore all objects above this
locationConstraint string // location constraint of new buckets
}
// FsObjectS3 describes a s3 object
type FsObjectS3 struct {
// Object describes a s3 object
type Object struct {
// Will definitely have everything but meta which may be nil
//
// List will read everything but meta - to fill that in need to call
// readMetaData
s3 *FsS3 // what this object is part of
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time.Time // Last modified
meta s3.Headers // The object metadata if known - may be nil
fs *Fs // what this object is part of
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time.Time // Last modified
meta map[string]*string // The object metadata if known - may be nil
}
// ------------------------------------------------------------
// String converts this FsS3 to a string
func (f *FsS3) String() string {
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.root == "" {
return fmt.Sprintf("S3 bucket %s", f.bucket)
}
@@ -153,61 +192,88 @@ func s3ParsePath(path string) (bucket, directory string, err error) {
}
// s3Connection makes a connection to s3
func s3Connection(name string) (*s3.S3, error) {
func s3Connection(name string) (*s3.S3, *session.Session, error) {
// Make the auth
accessKeyId := fs.ConfigFile.MustValue(name, "access_key_id")
if accessKeyId == "" {
return nil, errors.New("access_key_id not found")
}
accessKeyID := fs.ConfigFile.MustValue(name, "access_key_id")
secretAccessKey := fs.ConfigFile.MustValue(name, "secret_access_key")
if secretAccessKey == "" {
return nil, errors.New("secret_access_key not found")
}
auth := aws.Auth{AccessKey: accessKeyId, SecretKey: secretAccessKey}
// FIXME look through all the regions by name and use one of them if found
// Synthesize the region
s3Endpoint := fs.ConfigFile.MustValue(name, "endpoint")
if s3Endpoint == "" {
s3Endpoint = "https://s3.amazonaws.com/"
}
region := aws.Region{
Name: "s3",
S3Endpoint: s3Endpoint,
S3LocationConstraint: false,
}
s3LocationConstraint := fs.ConfigFile.MustValue(name, "location_constraint")
if s3LocationConstraint != "" {
region.Name = s3LocationConstraint
region.S3LocationConstraint = true
var auth *credentials.Credentials
switch {
case accessKeyID == "" && secretAccessKey == "":
fs.Debug(name, "Using anonymous access for S3")
auth = credentials.AnonymousCredentials
case accessKeyID == "":
return nil, nil, errors.New("access_key_id not found")
case secretAccessKey == "":
return nil, nil, errors.New("secret_access_key not found")
default:
auth = credentials.NewStaticCredentials(accessKeyID, secretAccessKey, "")
}
c := s3.New(auth, region)
return c, nil
endpoint := fs.ConfigFile.MustValue(name, "endpoint")
region := fs.ConfigFile.MustValue(name, "region")
if region == "" && endpoint == "" {
endpoint = "https://s3.amazonaws.com/"
}
if region == "" {
region = "us-east-1"
}
awsConfig := aws.NewConfig().
WithRegion(region).
WithMaxRetries(maxRetries).
WithCredentials(auth).
WithEndpoint(endpoint).
WithHTTPClient(fs.Config.Client()).
WithS3ForcePathStyle(true)
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
ses := session.New()
c := s3.New(ses, awsConfig)
if region == "other-v2-signature" {
fs.Debug(name, "Using v2 auth")
signer := func(req *request.Request) {
// Ignore AnonymousCredentials object
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
sign(accessKeyID, secretAccessKey, req.HTTPRequest)
}
c.Handlers.Sign.Clear()
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
c.Handlers.Sign.PushBack(signer)
}
// Add user agent
c.Handlers.Build.PushBack(func(r *request.Request) {
r.HTTPRequest.Header.Set("User-Agent", fs.UserAgent)
})
return c, ses, nil
}
// NewFsS3 contstructs an FsS3 from the path, bucket:path
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
bucket, directory, err := s3ParsePath(root)
if err != nil {
return nil, err
}
c, err := s3Connection(name)
c, ses, err := s3Connection(name)
if err != nil {
return nil, err
}
f := &FsS3{
f := &Fs{
name: name,
c: c,
bucket: bucket,
b: c.Bucket(bucket),
perm: s3.Private, // FIXME need user to specify
root: directory,
ses: ses,
// FIXME perm: s3.Private, // FIXME need user to specify
root: directory,
locationConstraint: fs.ConfigFile.MustValue(name, "location_constraint"),
}
if f.root != "" {
f.root += "/"
// Check to see if the object exists
_, err = f.b.Head(directory, nil)
req := s3.HeadObjectInput{
Bucket: &f.bucket,
Key: &directory,
}
_, err = f.c.HeadObject(&req)
if err == nil {
remote := path.Base(directory)
f.root = path.Dir(directory)
@@ -221,27 +287,28 @@ func NewFs(name, root string) (fs.Fs, error) {
return fs.NewLimited(f, obj), nil
}
}
// f.listMultipartUploads()
return f, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsS3) NewFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
o := &FsObjectS3{
s3: f,
func (f *Fs) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info but not meta
var err error
o.lastModified, err = time.Parse(time.RFC3339, info.LastModified)
if err != nil {
fs.Log(o, "Failed to read last modified: %s", err)
if info.LastModified == nil {
fs.Log(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = *info.LastModified
}
o.etag = info.ETag
o.bytes = info.Size
o.etag = aws.StringValue(info.ETag)
o.bytes = aws.Int64Value(info.Size)
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
@@ -252,64 +319,93 @@ func (f *FsS3) NewFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
return o
}
// Return an FsObject from a path
// NewFsObject returns an FsObject from a path
//
// May return nil if an error occurred
func (f *FsS3) NewFsObject(remote string) fs.Object {
return f.NewFsObjectWithInfo(remote, nil)
func (f *Fs) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
func (f *Fs) list(directories bool, fn func(string, *s3.Object)) {
maxKeys := int64(listChunkSize)
delimiter := ""
if directories {
delimiter = "/"
}
// FIXME need to implement ALL loop
objects, err := f.b.List(f.root, delimiter, "", 10000)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
} else {
rootLength := len(f.root)
if directories {
for _, remote := range objects.CommonPrefixes {
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
continue
}
remote := remote[rootLength:]
fn(remote, &s3.Key{Key: remote})
}
var marker *string
for {
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{
Bucket: &f.bucket,
Delimiter: &delimiter,
Prefix: &f.root,
MaxKeys: &maxKeys,
Marker: marker,
}
resp, err := f.c.ListObjects(&req)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err)
break
} else {
for i := range objects.Contents {
object := &objects.Contents[i]
if !strings.HasPrefix(object.Key, f.root) {
fs.Log(f, "Odd name received %q", object.Key)
continue
rootLength := len(f.root)
if directories {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix.Prefix == nil {
fs.Log(f, "Nil common prefix received")
continue
}
remote := *commonPrefix.Prefix
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
continue
}
remote = remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
fn(remote, &s3.Object{Key: &remote})
}
remote := object.Key[rootLength:]
fn(remote, object)
} else {
for _, object := range resp.Contents {
key := aws.StringValue(object.Key)
if !strings.HasPrefix(key, f.root) {
fs.Log(f, "Odd name received %q", key)
continue
}
remote := key[rootLength:]
fn(remote, object)
}
}
if !aws.BoolValue(resp.IsTruncated) {
break
}
// Use NextMarker if set, otherwise use last Key
if resp.NextMarker == nil || *resp.NextMarker == "" {
marker = resp.Contents[len(resp.Contents)-1].Key
} else {
marker = resp.NextMarker
}
}
}
}
// Walk the path returning a channel of FsObjects
func (f *FsS3) List() fs.ObjectsChan {
// List walks the path returning a channel of FsObjects
func (f *Fs) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
if f.bucket == "" {
// Return no objects at top level list
close(out)
fs.Stats.Error()
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
} else {
go func() {
defer close(out)
f.list(false, func(remote string, object *s3.Key) {
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
f.list(false, func(remote string, object *s3.Object) {
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
})
@@ -318,22 +414,23 @@ func (f *FsS3) List() fs.ObjectsChan {
return out
}
// Lists the buckets
func (f *FsS3) ListDir() fs.DirChan {
// ListDir lists the buckets
func (f *Fs) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
if f.bucket == "" {
// List the buckets
go func() {
defer close(out)
buckets, err := f.c.ListBuckets()
req := s3.ListBucketsInput{}
resp, err := f.c.ListBuckets(&req)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list buckets: %s", err)
fs.ErrorLog(f, "Couldn't list buckets: %s", err)
} else {
for _, bucket := range buckets {
for _, bucket := range resp.Buckets {
out <- &fs.Dir{
Name: bucket.Name,
When: bucket.CreationDate,
Name: aws.StringValue(bucket.Name),
When: aws.TimeValue(bucket.CreationDate),
Bytes: -1,
Count: -1,
}
@@ -344,10 +441,14 @@ func (f *FsS3) ListDir() fs.DirChan {
// List the directories in the path in the bucket
go func() {
defer close(out)
f.list(true, func(remote string, object *s3.Key) {
f.list(true, func(remote string, object *s3.Object) {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
out <- &fs.Dir{
Name: remote,
Bytes: object.Size,
Bytes: size,
Count: 0,
}
})
@@ -357,74 +458,136 @@ func (f *FsS3) ListDir() fs.DirChan {
}
// Put the FsObject into the bucket
func (f *FsS3) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary FsObject under construction
fs := &FsObjectS3{s3: f, remote: remote}
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: remote,
}
return fs, fs.Update(in, modTime, size)
}
// Mkdir creates the bucket if it doesn't exist
func (f *FsS3) Mkdir() error {
err := f.b.PutBucket(f.perm)
if err, ok := err.(*s3.Error); ok {
if err.Code == "BucketAlreadyOwnedByYou" {
func (f *Fs) Mkdir() error {
req := s3.CreateBucketInput{
Bucket: &f.bucket,
ACL: &f.perm,
}
if f.locationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
LocationConstraint: &f.locationConstraint,
}
}
_, err := f.c.CreateBucket(&req)
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
return nil
}
}
return err
}
// Rmdir deletes the bucket
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty
func (f *FsS3) Rmdir() error {
return f.b.DelBucket()
func (f *Fs) Rmdir() error {
if f.root != "" {
return nil
}
req := s3.DeleteBucketInput{
Bucket: &f.bucket,
}
_, err := f.c.DeleteBucket(&req)
return err
}
// Return the precision
func (f *FsS3) Precision() time.Duration {
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcFs := srcObj.fs
key := f.root + remote
source := srcFs.bucket + "/" + srcFs.root + srcObj.remote
req := s3.CopyObjectInput{
Bucket: &f.bucket,
Key: &key,
CopySource: &source,
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
}
_, err := f.c.CopyObject(&req)
if err != nil {
return nil, err
}
return f.NewFsObject(remote), err
}
// ------------------------------------------------------------
// Return the parent Fs
func (o *FsObjectS3) Fs() fs.Fs {
return o.s3
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
return o.fs
}
// Return a string version
func (o *FsObjectS3) String() string {
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Return the remote path
func (o *FsObjectS3) Remote() string {
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *FsObjectS3) Md5sum() (string, error) {
return strings.Trim(strings.ToLower(o.etag), `"`), nil
func (o *Object) Md5sum() (string, error) {
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(etag) {
// fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
return "", nil
}
return etag, nil
}
// Size returns the size of an object in bytes
func (o *FsObjectS3) Size() int64 {
func (o *Object) Size() int64 {
return o.bytes
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *FsObjectS3) readMetaData() (err error) {
func (o *Object) readMetaData() (err error) {
if o.meta != nil {
return nil
}
headers, err := o.s3.b.Head(o.s3.root+o.remote, nil)
key := o.fs.root + o.remote
req := s3.HeadObjectInput{
Bucket: &o.fs.bucket,
Key: &key,
}
resp, err := o.fs.c.HeadObject(&req)
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
@@ -432,19 +595,17 @@ func (o *FsObjectS3) readMetaData() (err error) {
var size int64
// Ignore missing Content-Length assuming it is 0
// Some versions of ceph do this due their apache proxies
if contentLength, ok := headers["Content-Length"]; ok {
size, err = strconv.ParseInt(contentLength, 10, 64)
if err != nil {
fs.Debug(o, "Failed to read size from: %q", headers)
return err
}
if resp.ContentLength != nil {
size = *resp.ContentLength
}
o.etag = headers["Etag"]
o.etag = aws.StringValue(resp.ETag)
o.bytes = size
o.meta = headers
if o.lastModified, err = time.Parse(http.TimeFormat, headers["Last-Modified"]); err != nil {
o.meta = resp.Metadata
if resp.LastModified == nil {
fs.Log(o, "Failed to read last modified from HEAD: %s", err)
o.lastModified = time.Now()
} else {
o.lastModified = *resp.LastModified
}
return nil
}
@@ -453,7 +614,7 @@ func (o *FsObjectS3) readMetaData() (err error) {
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *FsObjectS3) ModTime() time.Time {
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
@@ -461,11 +622,11 @@ func (o *FsObjectS3) ModTime() time.Time {
}
// read mtime out of metadata if available
d, ok := o.meta[metaMtime]
if !ok {
if !ok || d == nil {
// fs.Debug(o, "No metadata")
return o.lastModified
}
modTime, err := swift.FloatStringToTime(d)
modTime, err := swift.FloatStringToTime(*d)
if err != nil {
fs.Log(o, "Failed to read mtime from object: %s", err)
return o.lastModified
@@ -473,55 +634,105 @@ func (o *FsObjectS3) ModTime() time.Time {
return modTime
}
// Sets the modification time of the local fs object
func (o *FsObjectS3) SetModTime(modTime time.Time) {
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) {
err := o.readMetaData()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to read metadata: %s", err)
fs.ErrorLog(o, "Failed to read metadata: %s", err)
return
}
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
_, err = o.s3.b.Update(o.s3.root+o.remote, o.s3.perm, o.meta)
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
// Copy the object to itself to update the metadata
key := o.fs.root + o.remote
sourceKey := o.fs.bucket + "/" + key
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
req := s3.CopyObjectInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.perm,
Key: &key,
CopySource: &sourceKey,
Metadata: o.meta,
MetadataDirective: &directive,
}
_, err = o.fs.c.CopyObject(&req)
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
}
}
// Is this object storable
func (o *FsObjectS3) Storable() bool {
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *FsObjectS3) Open() (in io.ReadCloser, err error) {
in, err = o.s3.b.GetReader(o.s3.root + o.remote)
return
func (o *Object) Open() (in io.ReadCloser, err error) {
key := o.fs.root + o.remote
req := s3.GetObjectInput{
Bucket: &o.fs.bucket,
Key: &key,
}
resp, err := o.fs.c.GetObject(&req)
if err != nil {
return nil, err
}
return resp.Body, nil
}
// Update the Object from in with modTime and size
func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
// Set the mtime in the headers
headers := s3.Headers{
metaMtime: swift.TimeToFloatString(modTime),
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
u.Concurrency = 2
u.LeavePartsOnError = false
u.S3 = o.fs.c
})
// Set the mtime in the meta data
metadata := map[string]*string{
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
}
// Guess the content type
contentType := mime.TypeByExtension(path.Ext(o.remote))
if contentType == "" {
contentType = "application/octet-stream"
contentType := fs.MimeType(o)
key := o.fs.root + o.remote
req := s3manager.UploadInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.perm,
Key: &key,
Body: in,
ContentType: &contentType,
Metadata: metadata,
//ContentLength: &size,
}
_, err := uploader.Upload(&req)
if err != nil {
return err
}
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, contentType, o.s3.perm, headers)
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData()
return err
}
// Remove an object
func (o *FsObjectS3) Remove() error {
return o.s3.b.Del(o.s3.root + o.remote)
func (o *Object) Remove() error {
key := o.fs.root + o.remote
req := s3.DeleteObjectInput{
Bucket: &o.fs.bucket,
Key: &key,
}
_, err := o.fs.c.DeleteObject(&req)
return err
}
// Check the interfaces are satisfied
var _ fs.Fs = &FsS3{}
var _ fs.Object = &FsObjectS3{}
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Object = &Object{}
)

Some files were not shown because too many files have changed in this diff Show More