1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-09 21:20:10 +00:00

Compare commits

...

182 Commits

Author SHA1 Message Date
Nick Craig-Wood
bb75d80d33 Fix frontmatter 2016-04-18 18:55:07 +01:00
Nick Craig-Wood
157d7d45f5 Version v1.29 2016-04-18 18:30:29 +01:00
Nick Craig-Wood
b5cba73cc3 Make test more reliable 2016-04-18 17:48:52 +01:00
Nick Craig-Wood
dd36264aad Add FAQ All my uploaded docx/xlsx/pptx files appear as archive/zip
Fixes #417
2016-04-12 21:41:24 +01:00
Nick Craig-Wood
ddb47758f3 drive: increase default chunk size to 8 MB and document - fixes #397 2016-04-12 21:33:55 +01:00
Nick Craig-Wood
9539bbf78a Fix appveyor build after vet removal from tools repo 2016-04-07 20:07:00 +01:00
Nick Craig-Wood
0f8e7c3843 Make rclone check obey the --size-only flag - fixes #419 2016-04-07 15:01:45 +01:00
Nick Craig-Wood
b835330714 Use "application/octet-stream" if mime.TypeByExtension returns invalid type
Fixes #424
2016-04-07 14:32:01 +01:00
Nick Craig-Wood
310db14ed6 Notes on --transfers and B2 2016-04-04 17:58:36 +01:00
Klaus Post
7f2e9d9a6b Require go v1.5 for compilation
Google cloud package requires go v1.5 to compile, so we need to require the same for rclone.

Fixes #408
2016-04-04 17:34:39 +01:00
Nick Craig-Wood
6cc9c09610 drive: preserve mime type on file update - fixes #417 2016-04-04 16:58:42 +01:00
Nick Craig-Wood
93c60c34e1 b2: Fix incorrect value of Precision - should be 1ms not 1s 2016-03-24 15:23:27 +00:00
Klaus Post
02c11dd4a7 Don't de-reference swift connection
The connection object contains a mutex, so it is good practice not to dereference it to a value.

Reported by Go tip "go vet".
2016-03-23 17:09:05 +00:00
Klaus Post
40dc575aa4 Update Travis CI
- Only use golint if version is > Go 1.4
- Add Go 1.6 and tip as test targets.
2016-03-23 17:07:26 +00:00
Klaus Post
f8101771c9 Disable keepalive to keep server from serving stale results.
Fixes issue #402

Bonus fix: Fix "multiple header writes" warning when no code is received.
2016-03-23 16:57:56 +00:00
Klaus Post
8f4d6973fb Fix missing "quit" option when there are no remotes. 2016-03-23 16:57:56 +00:00
Nick Craig-Wood
ced3a4bc19 Implement -I, --ignore-times for unconditional upload - fixes #311 2016-03-22 17:02:27 +00:00
Nick Craig-Wood
cb22583212 b2: Enable mod time syncing - fixes #348 2016-03-22 15:56:44 +00:00
Nick Craig-Wood
414b35ea56 Change the interface of SetModTime to return an error - #348 2016-03-22 15:56:44 +00:00
Nick Craig-Wood
f469905d07 dropbox: Note 10,000 files limitation on purge - fixes #374 2016-03-22 14:46:43 +00:00
Nick Craig-Wood
20f4b2c91d b2: update API to new version - fixes #393
* Make reading mod time and SHA1 much more efficient
    * removes an HTTP transaction to increase speed
  * Reduce memory usage of the objects
2016-03-22 14:39:56 +00:00
Nick Craig-Wood
37543bd1d9 b2: Fix parsing of mod time when not in metadata
This files this error `Failed to parse mod time string "":
"src_last_modified_millis" not found in metadata`.
2016-03-22 10:26:37 +00:00
Nick Craig-Wood
0dc0052e93 Note that filters must use / not \ - #394 2016-03-19 17:40:54 +00:00
Nick Craig-Wood
bd27473762 swift: Don't return an MD5SUM for static large objects - #392
* rename isManifest to isDynamicLargeObject for clarity
2016-03-17 17:36:20 +00:00
Nick Craig-Wood
9dccf91da7 swift/hubic: document segmented object MD5SUM limitations - fixes #392 2016-03-16 17:39:44 +00:00
Nick Craig-Wood
a1323eb204 s3: Fix uploading files bigger than 50GB - fixes #386 2016-03-10 16:48:55 +00:00
Klaus Post
e57c4406f3 Add mutex to "warned" map.
Fixes #385
2016-03-10 15:51:56 +01:00
Nick Craig-Wood
fdd4b4ee22 drive: Add missing retries for Move and DirMove 2016-03-06 18:15:01 +00:00
Nick Craig-Wood
8ef551bf9c Make dedupe remove identical copies without asking and add non interactive mode - fixes #338
* Now removes identical copies without asking
  * Now obeys `--dry-run`
  * Implement `--dedupe-mode` for non interactive running
    * `--dedupe-mode interactive` - interactive the default.
    * `--dedupe-mode skip` - removes identical files then skips anything left.
    * `--dedupe-mode first` - removes identical files then keeps the first one.
    * `--dedupe-mode newest` - removes identical files then keeps the newest one.
    * `--dedupe-mode oldest` - removes identical files then keeps the oldest one.
    * `--dedupe-mode rename` - removes identical files then renames the rest to be different.
  * Add tests which will only run on Google Drive.
2016-03-06 18:15:01 +00:00
Nick Craig-Wood
2119fb4314 drive: tweak pacer to speed up directory listings and make more reliable 2016-03-06 18:15:01 +00:00
Nick Craig-Wood
0166544319 Add Attack constant to pacer 2016-03-05 20:29:05 +00:00
Nick Craig-Wood
874a64e5f6 A script to make a directory heirarchy for testing 2016-03-05 20:26:15 +00:00
Nick Craig-Wood
e0c03a11ab Commit missing docs changes and adjust RELEASE.md to make sure it doesn't happen again 2016-03-01 17:42:27 +00:00
Nick Craig-Wood
3c7f80f58f Version v1.28 2016-03-01 09:00:01 +00:00
Nick Craig-Wood
229ea3f86c Stop --update tests running on remotes which don't do mod time 2016-03-01 07:26:33 +00:00
Nick Craig-Wood
41eb386063 Reset password/config path in config tests to fix other tests 2016-02-29 21:43:37 +00:00
Nick Craig-Wood
dfc7cd97a3 Optionally disable gzip compression on downloads with --no-gzip-encoding - fixes #353 2016-02-29 19:48:54 +00:00
Nick Craig-Wood
280ac26464 Implement -u/--update so creation times can be used on all remotes - #226 2016-02-29 17:46:40 +00:00
Nick Craig-Wood
88cca8a6eb Simplify literals (after running gofmt -s over the code) 2016-02-29 16:57:23 +00:00
Nick Craig-Wood
9c263e3e2b Commit missing tests 2016-02-28 20:25:51 +00:00
Nick Craig-Wood
7d4e143dee Make it obvious that the client secrets are encrypted 2016-02-28 19:57:19 +00:00
Nick Craig-Wood
3343c1afa4 Don't make directories if --dry-run set - fixes #342 2016-02-28 19:56:50 +00:00
Nick Craig-Wood
b279df2e67 Drive: disable copy and move for google docs - fixes #332 2016-02-28 09:35:28 +00:00
Nick Craig-Wood
e6f340d245 swift: Fix uploading of chunked files with non ascii characters - fixes #350 2016-02-27 18:59:16 +00:00
Nick Craig-Wood
bfc66cceaa Update b2 docs after temp file changes 2016-02-27 16:32:40 +00:00
Nick Craig-Wood
1105b6bd94 Add Jakub Gedeon to contributors 2016-02-27 13:58:00 +00:00
Jakub Gedeon
694d390710 s3: Check if directory exists during Mkdir
If you dont have privs to create a bucket in S3 but it exists, don't
fail with an auth error, but detect that the mkdir was not needed and
return successfully.
2016-02-27 13:24:46 +00:00
Nick Craig-Wood
6b6b43402b b2: Use one upload URL per go routine
This fixes `more than one upload using auth token` errors.
2016-02-27 13:00:35 +00:00
Nick Craig-Wood
6f46270735 b2: Add pacing, retries and reauthentication - fixes #310 2016-02-27 12:04:45 +00:00
Nick Craig-Wood
ee5e34a19c b2: factor authorize account into its own method 2016-02-27 12:04:45 +00:00
Nick Craig-Wood
70902b4051 Make rest Set methods safe for concurrent calling 2016-02-27 12:04:45 +00:00
Nick Craig-Wood
f46304e8ae Update README from docs/content/about.md 2016-02-27 11:15:51 +00:00
Nick Craig-Wood
40252f0aa6 Make continuous integrations logs less noisy 2016-02-26 17:01:19 +00:00
Nick Craig-Wood
e7b9cc4705 Fix pacer tests 2016-02-26 16:59:52 +00:00
Nick Craig-Wood
867a26fe4f Implement --low-level-retries flag - fixes #266 2016-02-25 22:58:21 +00:00
Nick Craig-Wood
3890105cdc Add -run-only flag to run_all test 2016-02-25 22:05:57 +00:00
Nick Craig-Wood
d2219a800a Fix and document the move command - fixes #334
* Don't attempt to use server side Move unless they are on the same Fs
  * Fix move in the presense of filters
2016-02-25 20:05:34 +00:00
Nick Craig-Wood
ccb59480bd Add InActive method to Filter to detect when no fiters are in use. 2016-02-25 19:58:00 +00:00
Nick Craig-Wood
b5c5209162 Fix redirecting stderr on unix-like OSes - fixes #363 2016-02-24 22:03:14 +00:00
Nick Craig-Wood
835b6761b7 Write about convmv in the docs for fixing non UTF-8 filesystems - fixes #300 2016-02-21 14:09:06 +00:00
Nick Craig-Wood
f30c836696 Note Linux version requirements for running rclone - fixes #346 2016-02-21 13:59:24 +00:00
Nick Craig-Wood
090ce00afc Clarify Dropbox docs on mod times - fixes #345 2016-02-21 13:52:00 +00:00
Nick Craig-Wood
377986d599 Update config walk throughs with new style choice menu 2016-02-21 13:40:16 +00:00
Nick Craig-Wood
95e4d837ef Make config chooser easier to understand 2016-02-21 13:40:16 +00:00
Nick Craig-Wood
e08e35984c Add help to remote chooser in rclone config - fixes #43 2016-02-21 13:40:16 +00:00
Nick Craig-Wood
a3b4c8a0f2 Add issue template for github 2016-02-21 10:32:44 +00:00
Nick Craig-Wood
700e47d6e2 Stub out ReadPassword on plan9 and solaris to fix compilation 2016-02-21 10:31:53 +00:00
Nick Craig-Wood
ea11f5ff3d Stop make beta remaking the docs 2016-02-21 10:29:48 +00:00
klauspost
758c7f2d84 Avoid b2 temporary file.
If source can provide SHA1 hash we don't copy input to a temporary file.

Fixes #358
2016-02-19 18:07:15 +00:00
klauspost
ef06371c93 Create separate interface for object information.
Take out read-only information about a Fs in a separate struct to limit access.

See discussion at #282.
2016-02-19 13:31:09 +00:00
Nick Craig-Wood
85a0f25b95 b2: Fix reading metadata for all files when using a subdir - fixes #356
Also fix some confusion with Metadata prefix/root.
2016-02-19 12:11:30 +00:00
klauspost
84b00b362f Change back to original goconfig package.
Add documentation for `--ask-password`.
2016-02-17 11:45:05 +01:00
klauspost
bfd7601cf9 Add configuration file encryption
See #317 for details.

Use `rclone config` to add/change/remove password.

Tests that loads the default configuration will now fail with a better error message, and add a switch that makes it possible to disable password prompts and fail instead.

Make it possible to use the "RCLONE_CONFIG_PASS" environment variable as password for configuration.
2016-02-16 16:32:05 +01:00
Nick Craig-Wood
4676a89963 Note that you may need curl --insecure when fetching root CA certificates 2016-02-16 14:55:26 +00:00
Nick Craig-Wood
8cd3c25b41 Amazon Cloud Drive: retry on 400, 401, 408, 504 and EOF errors - fixes #340 2016-02-16 14:45:22 +00:00
Nick Craig-Wood
5f97603684 Fix fetch test dependencies too. 2016-02-15 17:31:11 +00:00
Nick Craig-Wood
f1debd4701 Fetch test dependencies too. 2016-02-15 17:20:26 +00:00
Nick Craig-Wood
1cd0d9a1f2 Fix listing drive docs at root - fixes #336
* Remove full drive list code
    * it is slower and uses more data
    * having two directory listing routines is causing problems (including this one)
    * less code is more
  * Make sure we don't recurse into directories we don't own
  * Fix export extension handling and add tests
2016-02-15 16:46:43 +00:00
Nick Craig-Wood
a6320bbad3 Fix delete command to wait until all finished - fixes missing deletes.
This also could affect deletes at the end of the sync command.
2016-02-15 16:43:59 +00:00
Nick Craig-Wood
b1dd8e998b Yandex Disk: Use http.Client passed in for all operations - fixes logging. 2016-02-15 16:43:18 +00:00
Xavier Lucas
c2e8f06bfa Swift storageUrl overloading fixes #167 2016-02-09 22:17:13 +00:00
Nick Craig-Wood
08a8f7174a Add Brian Stengaard to contributors 2016-02-09 21:45:51 +00:00
Nick Craig-Wood
ce4c1d4f35 s3: Fix empty checks in auth 2016-02-09 17:19:33 +00:00
Nick Craig-Wood
a0b9bd527e Add both forms of env var to the docs 2016-02-09 17:19:13 +00:00
Brian Stengaard
ce05ef7110 Add IAM role and Env credentials
This will make the s3 provider authentaction logic

  - Configured credentials if both key and secret available
  - Anonymous if key and secret missing and env_auth not set
  - if env_auth is set to truthy (https://golang.org/pkg/strconv/#ParseBool)
    - AWS_ACCESS_KEY_ID / AWS_SECRET_ACCESS_KEY environment variables
    - IAM role credentials as fallback
2016-02-09 16:32:36 +00:00
Werner Beroux
6a47d966a4 Update filtering documentation - fixes #306
Explains that filtering is done relative to the remote root.

Also removes a section that seems more about internal knowledge and
that may likely more confuse people. Adds instead a section giving an
overview of how to perform filtering before going into details.
2016-02-09 16:25:19 +00:00
Nick Craig-Wood
85d99de26b Fix typo in error strings 2016-02-09 16:15:50 +00:00
Nick Craig-Wood
4a82251c62 Add man page to repository too (missed from #256) 2016-02-07 20:26:10 +00:00
Nick Craig-Wood
e62c0a58a7 Version 1.27 2016-01-31 17:50:13 +00:00
Nick Craig-Wood
1f3e48f18f Add manuals to repository - fixes #256 2016-01-31 16:34:30 +00:00
Nick Craig-Wood
bbbe11790b Update docs to make syncing from a directory more obvious - fixes #302 2016-01-31 16:27:19 +00:00
Nick Craig-Wood
13edf62824 Document rclone return codes - fixes #308 2016-01-31 16:15:25 +00:00
Nick Craig-Wood
558bc2e132 drive: Export Google documents - fixes #49
Rclone will download one format of a google doc. The choice of which
export format is controlled by the `--drive-formats` flag.
2016-01-31 16:10:43 +00:00
Nick Craig-Wood
0f73129ab7 dedupe command to deduplicate a remote. Useful with google drive - fixes #41 2016-01-31 16:09:42 +00:00
Nick Craig-Wood
1373efaa39 Delete command which does obey the filters - fixes #327 2016-01-31 16:06:04 +00:00
Nick Craig-Wood
5c37b777fc Make the --dry-run warnings into logs so they appear without the -v flag 2016-01-31 16:06:04 +00:00
Nick Craig-Wood
d4df3f2154 acd: Download files >= 9GB with their tempLink direct from s3
This files the problem downloading files > 10GB.

Fixes #204 Fixes #313
2016-01-30 18:08:44 +00:00
Nick Craig-Wood
8ae424c5a3 Emphasize testing sync with --dry-run and -v 2016-01-29 07:59:33 +00:00
Nick Craig-Wood
cae19df058 s3: URL escape CopySource
This fixes metadata update and copy for files with `+` in

Fixes #315
2016-01-27 17:39:33 +00:00
Nick Craig-Wood
8c211fc8df Warn the user about files with same name but different case
Relates to #107 & #119.
2016-01-26 16:57:09 +00:00
Nick Craig-Wood
74a71f7824 Add tests for --delete-before, --delete-during and --delete-after 2016-01-26 16:57:09 +00:00
Nick Craig-Wood
12b51c5eb8 Remove duplicate check for filter IncludeObject 2016-01-26 16:57:09 +00:00
klauspost
14069fd8e6 Implement --delete-before, --delete-during, --delete-after - fixes #252. 2016-01-26 16:57:09 +00:00
Nick Craig-Wood
cd62f41606 Reduce number of logs and show hash type where appropriate 2016-01-24 18:06:57 +00:00
Nick Craig-Wood
109d4ee490 Prefix all test remotes with rclone-test- and make names more pronouncable 2016-01-24 12:37:46 +00:00
Nick Craig-Wood
18ebec8276 Check remote is empty between integration tests 2016-01-24 12:37:19 +00:00
Nick Craig-Wood
c47b4f828f acd: Fix deadlock in directory traversal code 2016-01-24 11:20:55 +00:00
Nick Craig-Wood
c3a0c0c451 swift: Fix upload from unprivileged user - fixes #273 2016-01-23 20:32:53 +00:00
Nick Craig-Wood
6cb0de43ce Deprecate compiling with go1.3 2016-01-23 17:27:00 +00:00
Nick Craig-Wood
83f0d3e03d acd: remove 409 conflict from error codes we will retry
This should fix the very long pauses or getting stuck people have seen
in uploads.
2016-01-23 17:02:09 +00:00
Nick Craig-Wood
eda4130703 Fix integration tests so they can be run independently and out of order - fixes #291
* Make all integration tests start with an empty remote
  * Add an -individual flag so this can be a different bucket/container/directory
  * Fix up tests after changing the hashers
  * Add sha1sum test
  * Make directory checking in tests sleep more to fix acd inconsistencies
  * Factor integration tests to make more maintainable
  * Ensure remote writes have a fstest.CheckItems() before use
    * this fixes eventual consistency on the directory listings later
  * Call fs.Stats.ResetCounters() before every fs.Sync()

Note that the tests shouldn't be run concurrently as fs.Config is global state.
2016-01-23 17:02:09 +00:00
Nick Craig-Wood
ccba859812 Test all available hashes for each remote 2016-01-23 09:10:36 +00:00
Nick Craig-Wood
de3cf5e8d7 Add -verbose flag to unit tests and add some more eventual consistency retries 2016-01-20 20:06:05 +00:00
Nick Craig-Wood
ce305321b6 amazon cloud drive: Fix "Next token is expired" - Fixes #289 Fixes #263
This should also fix the consequent "409 Conflict" name already exists errors.
2016-01-20 20:05:52 +00:00
Nick Craig-Wood
e6117e978e Add Werner Beroux to contributors 2016-01-20 16:33:28 +00:00
Werner Beroux
4b40898743 Update filtering.md
Clarify by removing the extension which makes it confusing if not careful.
2016-01-20 16:16:24 +01:00
Nick Craig-Wood
ae3a0ec27e b2: Don't re-read the SHA1 if we already have it 2016-01-19 08:21:20 +00:00
Nick Craig-Wood
d9458fb4ee b2: return error in Hash from readFileMetadata operation 2016-01-19 08:21:10 +00:00
Nick Craig-Wood
27f67edb1a Fix formatting problem in sha1sum 2016-01-17 13:56:42 +00:00
Nick Craig-Wood
3ffea738e6 Make hash constants start from 1 not 2 2016-01-17 10:47:24 +00:00
Nick Craig-Wood
a63dd6020c onedrive: fix incorrectly decoded SHA-1 2016-01-17 10:46:36 +00:00
Nick Craig-Wood
d0678bc3e5 local: report error on stat in Hash in case file disappeared 2016-01-17 10:46:19 +00:00
klauspost
ce04a073ef Update templates to changes in the latest hugo version
Fixes #295
2016-01-16 14:11:52 +00:00
Nick Craig-Wood
c337a367f3 Make make serve fail if make website would fail 2016-01-16 14:10:57 +00:00
klauspost
7ae40cb352 Update information on revised hash functionality. 2016-01-16 10:17:11 +00:00
Nick Craig-Wood
e8daab7971 Fix integration tests for remotes with unsupported hash schemes 2016-01-16 09:45:15 +00:00
klauspost
78c3a5ccfa Add support for multiple hash types.
Add support for multiple hash types with negotiation of common hash types for comparison.

Manually rebased version of #277 (see discussion there)
2016-01-11 13:39:33 +01:00
Nick Craig-Wood
2142c75846 Add missing docs for options - fixes #278 2016-01-10 12:04:20 +00:00
Nick Craig-Wood
c724d8f614 dropbox: Make file exclusion error controllable with -q #287 2016-01-10 11:49:04 +00:00
Nick Craig-Wood
af5f4ee724 Make --include rules add their implict exclude * at the end of the filter list
This means you can mix `--include` and `--include-from` with the
other filters (eg `--exclude`) but you must include all the files you
want in the include statement.

Fixes #280
2016-01-10 11:42:53 +00:00
Nick Craig-Wood
01aa4394a6 Explain that errored sync doesn't delete files - fixes #285 2016-01-10 10:44:33 +00:00
Nick Craig-Wood
2646519712 Add --memprofile flag 2016-01-09 15:25:48 +00:00
Nick Craig-Wood
5b2efd563a Add Xavier Lucas to contributors 2016-01-08 08:32:52 +00:00
xlucas
e7b7432079 OVH Swift authentication enpoint 2016-01-08 08:30:13 +00:00
Nick Craig-Wood
ea2ef4443b Remove -verbose from errcheck 2016-01-08 08:20:04 +00:00
klauspost
25f22ec561 Add "--ignore-existing" flag.
Add option to completely ignore existing files and not consider them for transfer.

Fixes #274
2016-01-08 08:20:04 +00:00
Nick Craig-Wood
5189231a34 Tweaks to rclone authorize
* Document the headless / remote setup procedure
  * Move Config constants into fs
  * Parse arguments in main for Authorize
2016-01-07 20:31:23 +00:00
klauspost
bcbd30bb8a Add easier headless configuration.
This will allow setting up a remote with copy&paste of values to a headless machine. It will allow copy+pasting a token into the configuration.

This requires rclone to be on a machine with a proper browser. Custom client id and secrets are supported.

To test token generation, use `rclone auth "fs type"`.
2016-01-07 20:31:23 +00:00
Nick Craig-Wood
c245183101 Stop errcheck running for go < 1.5 2016-01-07 16:37:51 +00:00
klauspost
4ce2a84df0 Document workaround for ACD maximum file size.
Document workaround for ACD maximum file size and display a warning in verbose mode before upload starts.

Fixes #215.
2016-01-05 17:12:16 +00:00
klauspost
3c31d711b3 Add local file system option to disable UNC on Windows.
This will add an option to disable UNC conversion on Windows to deal with buggy file system implementations like EncFS.

Fixes #261
2016-01-05 17:08:11 +00:00
Nick Craig-Wood
3f5d8390ba Add Björn Harrtell to contributors 2016-01-05 17:05:31 +00:00
Björn Harrtell
78edafcaac drive: add --drive-auth-owner-only to only consider files owned by the user. 2016-01-05 17:02:04 +00:00
Nick Craig-Wood
1ce3673006 Add -clean flag to test_all.go to clean left over test directories 2016-01-03 21:49:26 +00:00
Nick Craig-Wood
3423de65fa Make canonical place for all fs in fs/all/all.go 2016-01-03 14:12:45 +00:00
Nick Craig-Wood
0c81439bc3 Fix upload_github target 2016-01-02 12:18:32 +00:00
Nick Craig-Wood
77fb8ac240 Version 1.26 2016-01-02 12:04:32 +00:00
Nick Craig-Wood
979dfb8cc6 Add Joseph Spurrier to contributors 2016-01-02 11:50:49 +00:00
Joseph Spurrier
fe0289f2f5 s3: Fix corrupting Content-Type on mod time update
This fixes an issue where updating the modification time resets the
content-type to the S3 default of binary/octet-stream which breaks
static websites that expect an html file to have a content-type of
text/html.
2016-01-02 11:47:52 +00:00
Nick Craig-Wood
6a64567dd7 Add Dmitry Burdeev (dibu) to contributors 2016-01-02 11:45:30 +00:00
Nick Craig-Wood
8de8cd62ca yandex: stop create folder error being fatal 2015-12-30 21:07:42 +00:00
Nick Craig-Wood
cba27d2920 yandex: correct precision to 1ns 2015-12-30 20:47:44 +00:00
Nick Craig-Wood
9ade179407 yandex: Fix socket leaks 2015-12-30 14:30:16 +00:00
Nick Craig-Wood
82b85431bd yandex: Make it use our http client so logging, bwlimit etc works properly 2015-12-30 14:30:16 +00:00
Nick Craig-Wood
98778b1870 Docs for Yandex 2015-12-30 14:30:16 +00:00
Nick Craig-Wood
dfd46c23f9 Fix forgotten update for test_all.go 2015-12-30 12:12:24 +00:00
dibu28
3ac4407b88 Implement Yandex storage backend - fixes #234 2015-12-30 12:11:46 +00:00
Nick Craig-Wood
8ea0d5212f Add -verbose flag to test_all and fix tries count 2015-12-30 11:34:22 +00:00
Nick Craig-Wood
acd350d833 Add retry for eventual consistency in findObject test 2015-12-30 10:46:04 +00:00
Nick Craig-Wood
2f4b9f619d Add C. Bess to contributors 2015-12-30 10:13:11 +00:00
C. Bess
70efd0274c Add Contributing link to readme 2015-12-30 10:10:53 +00:00
Nick Craig-Wood
33b3eea6ec Implement Backblaze B2 - fixes #224 2015-12-30 10:05:07 +00:00
Nick Craig-Wood
113624691a Add -dump-headers and -dump-bodies flags for operations test debugging 2015-12-30 09:35:35 +00:00
Nick Craig-Wood
afaec1a2e9 Use test logger instead of log for test output 2015-12-30 09:35:25 +00:00
Nick Craig-Wood
ddf39f2d57 Replace test_all.sh with test_all.go which is cross platform and parallel 2015-12-30 09:26:34 +00:00
Nick Craig-Wood
2df5d95d70 Documentation for --min-age and --max-age 2015-12-29 19:34:10 +00:00
Nick Craig-Wood
64a808ac76 Add CONTRIBUTING file 2015-12-29 19:23:20 +00:00
Nick Craig-Wood
05dc7183cb onedrive: Don't mask HTTP error codes with JSON decode error 2015-12-28 15:15:12 +00:00
Nick Craig-Wood
e69e181090 Fix --min-age and --max-age when only one is present 2015-12-17 14:22:43 +00:00
Nick Craig-Wood
a1269fa669 Make sure we use bash as our shell 2015-12-17 13:30:58 +00:00
Nick Craig-Wood
8369b5209f swift: Make sure we read the size for 0 length files - Fixes #237
This was causing a problem with sync for chunked files.  The directory
listing would read their size back as 0 and see that the size had
changed and immediately resync it.
2015-12-17 13:30:58 +00:00
Nick Craig-Wood
2aa3c0a2af make beta announces destination URL 2015-12-17 13:30:58 +00:00
Nick Craig-Wood
ac65d8369e Make fs.CheckClose public to stop duplication 2015-12-17 13:30:58 +00:00
Nick Craig-Wood
7a24532224 Factor REST library out of onedrive 2015-12-17 13:30:58 +00:00
Nick Craig-Wood
8057d668bb Fix crash in http logging - fixes #223
A nil-pointer exception was caused if the http transaction ever
resulted in a go error while using `--dump-bodies`.  Now don't ignore
the error and log it instead of the http body.
2015-12-17 13:30:58 +00:00
Nick Craig-Wood
36f1bc4a8a Make ls/lsl/md5sum/size/check obey includes and excludes - fixes #169
* run check directory listings concurrently
2015-12-17 13:30:58 +00:00
Nick Craig-Wood
beb8098b0a Ignore current builds when uploading to github 2015-12-17 13:28:12 +00:00
Nick Craig-Wood
6e64a71382 Add Adriano Aurélio Meirelles to contributors 2015-12-17 13:28:12 +00:00
Adriano Aurélio Meirelles
3cbd57d9ad Add support to filter files based on their age 2015-12-17 09:52:38 -02:00
Nick Craig-Wood
4f50b26af0 Add missing cloud storage systems 2015-11-23 22:19:50 +00:00
Nick Craig-Wood
cb651b5866 Upload releases to github too - fixes #225 2015-11-23 22:18:21 +00:00
Nick Craig-Wood
3c1069c815 onedrive: re-enable server side copy 2015-11-22 11:04:16 +00:00
135 changed files with 23718 additions and 1535 deletions

4
.gitignore vendored
View File

@@ -4,7 +4,3 @@ rclone
rclonetest/rclonetest
build
docs/public
MANUAL.md
MANUAL.html
MANUAL.txt
rclone.1

View File

@@ -7,19 +7,17 @@ os:
# - osx
go:
- 1.3.3
- 1.4.2
- 1.5.1
# - tip
- 1.5.3
- 1.6
- tip
install:
- go get ./...
- go get -t ./...
- go get -u github.com/kisielk/errcheck
- go get -u golang.org/x/tools/cmd/goimports
- go get -u github.com/golang/lint/golint
script:
- make check
- go test -v ./...
- go test -cpu=2 -race -v ./...
- go test ./...
- go test -cpu=2 -race ./...

161
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,161 @@
# Contributing to rclone #
This is a short guide on how to contribute things to rclone.
## Reporting a bug ##
Bug reports are welcome. Please when submitting add:
* Rclone version (eg output from `rclone -V`)
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
* A log of the command with the `-v` flag (eg output from `rclone -v copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a pull request ##
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via Github.
If it is a big feature then make an issue first so it can be discussed.
You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's Github
page](https://github.com/ncw/rclone).
Now in your terminal
go get github.com/ncw/rclone
cd $GOPATH/src/github.com/ncw/rclone
git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git
Make a branch to add your new feature
git checkout -b my-new-feature
And get hacking.
When ready - run the unit tests for the code you changed
go test -v
Note that you make need to make a test remote, eg `TestSwift` for some
of the unit tests.
Note the top level Makefile targets
* make check
* make test
Both of these will be run by Travis when you make a pull request but
you can do this yourself locally too.
Make sure you
* Add documentation for a new feature
* Add unit tests for a new feature
* squash commits down to one per feature
* rebase to master `git rebase master`
When you are done with that
git push origin my-new-feature
Go to the Github website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits,
rebase it to master then push it to Github with `--force`.
## Testing ##
rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
go test -v ./...
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can
run against any of the backends. This is done by making specially
named remotes in the default config file.
If you wanted to test changes in the `drive` backend, then you would
need to make a remote called `TestDrive`.
You can then run the unit tests in the drive directory. These tests
are skipped if `TestDrive:` isn't defined.
cd drive
go test -v
You can then run the integration tests which tests all of rclone's
operations. Normally these get run against the local filing system,
but they can be run against any of the remotes.
cd ../fs
go test -v -remote TestDrive:
go test -v -remote TestDrive: -subdir
If you want to run all the integration tests against all the remotes,
then run in that directory
go run test_all.go
## Making a release ##
There are separate instructions for making a release in the RELEASE.md
file - doing the first few steps is useful before making a
contribution.
* go get -u -f -v ./...
* make check
* make test
* make tag
## Writing a new backend ##
Choose a name. The docs here will use `remote` as an example.
Note that in rclone terminology a file system backend is called a
remote or an fs.
Research
* Look at the interfaces defined in `fs/fs.go`
* Study one or more of the existing remotes
Getting going
* Create `remote/remote.go` (copy this from a similar fs)
* Add your fs to the imports in `fs/all/all.go`
Unit tests
* Create a config entry called `TestRemote` for the unit tests to use
* Add your fs to the end of `fstest/fstests/gen_tests.go`
* generate `remote/remote_test.go` unit tests `cd fstest/fstests; go generate`
* Make sure all tests pass with `go test -v`
Integration tests
* Add your fs to `fs/test_all.go`
* Make sure integration tests pass with
* `cd fs`
* `go test -v -remote TestRemote:` and
* `go test -v -remote TestRemote: -subdir`
Add your fs to the docs
* `README.md` - main Github page
* `docs/content/remote.md` - main docs page
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `make_manual.py` - add the page to the `docs` constant

13
ISSUE_TEMPLATE.md Normal file
View File

@@ -0,0 +1,13 @@
When filing an issue, please include the following information if
possible as well as a description of the problem.
> What is your rclone version (eg output from `rclone -V`)
> Which OS you are using and how many bits (eg Windows 7, 64 bit)
> Which cloud storage system are you using? (eg Google Drive)
> The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
> A log from the command with the `-v` flag (eg output from `rclone -v copy /tmp remote:tmp`)

2430
MANUAL.html Normal file

File diff suppressed because it is too large Load Diff

3372
MANUAL.md Normal file

File diff suppressed because it is too large Load Diff

3366
MANUAL.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -9,7 +9,7 @@ rclone:
test: rclone
go test ./...
cd fs && ./test_all.sh
cd fs && go run test_all.go
check: rclone
go vet ./...
@@ -39,7 +39,7 @@ clean:
go clean ./...
find . -name \*~ | xargs -r rm -f
rm -rf build docs/public
rm -f rclone rclonetest/rclonetest rclone.1 MANUAL.md MANUAL.html MANUAL.txt
rm -f rclone rclonetest/rclonetest
website:
cd docs && hugo
@@ -50,18 +50,22 @@ upload_website: website
upload:
rclone -v copy build/ memstore:downloads-rclone-org
upload_github:
./upload-github $(TAG)
cross: doc
./cross-compile $(TAG)
beta: doc
beta:
./cross-compile $(TAG)β
rm build/*-current-*
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
@echo Beta release ready at http://pub.rclone.org/$(TAG)%CE%B2/
serve:
serve: website
cd docs && hugo server -v -w
tag:
tag: doc
@echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEW_TAG)"
echo -e "package fs\n\n// Version of rclone\nconst Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go

View File

@@ -2,6 +2,7 @@
[Website](http://rclone.org) |
[Documentation](http://rclone.org/docs/) |
[Contributing](CONTRIBUTING.md) |
[Changelog](http://rclone.org/changelog/) |
[Installation](http://rclone.org/install/) |
[G+](https://google.com/+RcloneOrg)
@@ -17,17 +18,21 @@ Rclone is a command line program to sync files and directories to and from
* Dropbox
* Google Cloud Storage
* Amazon Cloud Drive
* Microsoft One Drive
* Hubic
* Backblaze B2
* Yandex Disk
* The local filesystem
Features
* MD5SUMs checked at all times for file integrity
* MD5/SHA1 hashes checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* Copy mode to just copy new/changed files
* Sync mode to make a directory identical
* Check mode to check all MD5SUMs
* Can sync to and from network, eg two different Drive accounts
* Sync (one way) mode to make a directory identical
* Check mode to check for file hash equality
* Can sync to and from network, eg two different cloud accounts
See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.

View File

@@ -8,11 +8,12 @@ Required software for making a release
* golint - go get github.com/golang/lint
Making a release
* go get -u -f -v ./...
* go get -t -u -f -v ./...
* make check
* make test
* make tag
* edit docs/content/changelog.md
* make doc
* git commit -a -v
* make retag
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross - not required for go >= 1.5
@@ -20,3 +21,4 @@ Making a release
* make upload
* make upload_website
* git push --tags origin master
* make upload_github

View File

@@ -27,22 +27,26 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/pacer"
"github.com/spf13/pflag"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "amzn1.application-oa2-client.6bf18d2d1f5b485c94c8988bb03ad0e7"
rcloneClientSecret = "k8/NyszKm5vEkZXAwsbGkd6C3NrbjIqMg4qEhIeF14Szub2wur+/teS3ubXgsLe9//+tr/qoqK+lq6mg8vWkoA=="
folderKind = "FOLDER"
fileKind = "FILE"
assetKind = "ASSET"
statusAvailable = "AVAILABLE"
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time.Millisecond
rcloneClientID = "amzn1.application-oa2-client.6bf18d2d1f5b485c94c8988bb03ad0e7"
rcloneEncryptedClientSecret = "k8/NyszKm5vEkZXAwsbGkd6C3NrbjIqMg4qEhIeF14Szub2wur+/teS3ubXgsLe9//+tr/qoqK+lq6mg8vWkoA=="
folderKind = "FOLDER"
fileKind = "FILE"
assetKind = "ASSET"
statusAvailable = "AVAILABLE"
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time.Millisecond
warnFileSize = 50 << 30 // Display warning for files larger than this size
)
// Globals
var (
// Flags
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
// Description of how to auth for this app
acdConfig = &oauth2.Config{
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
@@ -51,39 +55,42 @@ var (
TokenURL: "https://api.amazon.com/auth/o2/token",
},
ClientID: rcloneClientID,
ClientSecret: fs.Reveal(rcloneClientSecret),
ClientSecret: fs.Reveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "amazon cloud drive",
NewFs: NewFs,
fs.Register(&fs.RegInfo{
Name: "amazon cloud drive",
Description: "Amazon Cloud Drive",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config(name, acdConfig)
err := oauthutil.Config("amazon cloud drive", name, acdConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: oauthutil.ConfigClientID,
Name: fs.ConfigClientID,
Help: "Amazon Application Client Id - leave blank normally.",
}, {
Name: oauthutil.ConfigClientSecret,
Name: fs.ConfigClientSecret,
Help: "Amazon Application Client Secret - leave blank normally.",
}},
})
pflag.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
}
// Fs represents a remote acd server
type Fs struct {
name string // name of this remote
c *acd.Client // the connection to the acd server
root string // the path we are working on
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
name string // name of this remote
c *acd.Client // the connection to the acd server
noAuthClient *http.Client // unauthenticated http client
root string // the path we are working on
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
}
// Object describes a acd object
@@ -123,15 +130,21 @@ func parsePath(path string) (root string) {
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
400, // Bad request (seen in "Next token is expired")
401, // Unauthorized (seen in "Token has expired")
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
409, // Conflict - happens in the unit tests a lot
503, // Service Unavailable
504, // Gateway Time-out
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
if err == io.EOF {
return true, err
}
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -146,10 +159,11 @@ func NewFs(name, root string) (fs.Fs, error) {
c := acd.NewClient(oAuthClient)
c.UserAgent = fs.UserAgent
f := &Fs{
name: name,
root: root,
c: c,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
name: name,
root: root,
c: c,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
noAuthClient: fs.Config.Client(),
}
// Update endpoints
@@ -246,7 +260,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
return "", false, err
}
if subFolder.Status != nil && *subFolder.Status != statusAvailable {
fs.Debug(f, "Ignoring folder %q in state %q", *subFolder.Status)
fs.Debug(f, "Ignoring folder %q in state %q", leaf, *subFolder.Status)
time.Sleep(1 * time.Second) // FIXME wait for problem to go away!
return "", false, nil
}
@@ -376,6 +390,84 @@ func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) err
return nil
}
// Path should be directory path either "" or "path/"
//
// List the directory using a recursive list from the root
//
// This fetches the minimum amount of stuff but does more API calls
// which makes it slow
func (f *Fs) listDirNonRecursive(dirID string, path string, out fs.ObjectsChan) error {
// Start some directory listing go routines
var wg sync.WaitGroup // sync closing of go routines
var traversing sync.WaitGroup // running directory traversals
type dirListJob struct {
dirID string
path string
}
in := make(chan dirListJob, fs.Config.Checkers)
errs := make(chan error, fs.Config.Checkers)
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for job := range in {
var jobs []dirListJob
fs.Debug(f, "Reading %q", job.path)
// Make the API request
_, err := f.listAll(job.dirID, "", false, false, func(node *acd.Node) bool {
// Recurse on directories
switch *node.Kind {
case folderKind:
jobs = append(jobs, dirListJob{dirID: *node.Id, path: job.path + *node.Name + "/"})
case fileKind:
if fs := f.newFsObjectWithInfo(job.path+*node.Name, node); fs != nil {
out <- fs
}
default:
// ignore ASSET etc
}
return false
})
fs.Debug(f, "Finished reading %q", job.path)
if err != nil {
fs.ErrorLog(f, "Error reading %s: %s", path, err)
errs <- err
}
// FIXME stop traversal on error?
traversing.Add(len(jobs))
go func() {
// Now we have traversed this directory, send these jobs off for traversal in
// the background
for _, job := range jobs {
in <- job
}
}()
traversing.Done()
}
}()
}
// Collect the errors
wg.Add(1)
var errResult error
go func() {
defer wg.Done()
for err := range errs {
errResult = err
}
}()
// Start the process
traversing.Add(1)
in <- dirListJob{dirID: dirID, path: path}
traversing.Wait()
close(in)
close(errs)
wg.Wait()
return errResult
}
// List walks the path returning a channel of FsObjects
func (f *Fs) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
@@ -386,7 +478,7 @@ func (f *Fs) List() fs.ObjectsChan {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
err = f.listDirNonRecursive(f.dirCache.RootID(), "", out)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "List failed: %s", err)
@@ -430,7 +522,9 @@ func (f *Fs) ListDir() fs.DirChan {
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
remote := src.Remote()
size := src.Size()
// Temporary Object under construction
o := &Object{
fs: f,
@@ -440,11 +534,14 @@ func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs
if err != nil {
return nil, err
}
if size > warnFileSize {
fs.Debug(f, "Warning: file %q may fail because it is too big. Use --max-size=%dGB to skip large files.", remote, warnFileSize>>30)
}
folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
var info *acd.File
var resp *http.Response
err = f.pacer.CallNoRetry(func() (bool, error) {
if size != 0 {
if src.Size() != 0 {
info, resp, err = folder.Put(in, leaf)
} else {
info, resp, err = folder.PutSized(in, size, leaf)
@@ -529,6 +626,11 @@ func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
@@ -564,7 +666,7 @@ func (f *Fs) Purge() error {
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
func (o *Object) Fs() fs.Info {
return o.fs
}
@@ -581,8 +683,11 @@ func (o *Object) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Md5sum() (string, error) {
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
}
if o.info.ContentProperties.Md5 != nil {
return *o.info.ContentProperties.Md5, nil
}
@@ -640,9 +745,9 @@ func (o *Object) ModTime() time.Time {
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) {
func (o *Object) SetModTime(modTime time.Time) error {
// FIXME not implemented
return
return fs.ErrorCantSetModTime
}
// Storable returns a boolean showing whether this object storable
@@ -652,10 +757,18 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open() (in io.ReadCloser, err error) {
bigObject := o.Size() >= int64(tempLinkThreshold)
if bigObject {
fs.Debug(o, "Dowloading large object via tempLink")
}
file := acd.File{Node: o.info}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
in, resp, err = file.Open()
if !bigObject {
in, resp, err = file.Open()
} else {
in, resp, err = file.OpenTempURL(o.fs.noAuthClient)
}
return shouldRetry(resp, err)
})
return in, err
@@ -664,7 +777,8 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
size := src.Size()
file := acd.File{Node: o.info}
var info *acd.File
var resp *http.Response

View File

@@ -42,7 +42,7 @@ func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }

View File

@@ -8,14 +8,13 @@ environment:
GOPATH: c:\gopath
install:
- go get golang.org/x/tools/cmd/vet
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go get -d ./...
- go get -t -d ./...
build_script:
- go vet ./...
- go test -v -cpu=2 ./...
- go test -cpu=2 ./...
- go test -cpu=2 -short -race ./...

151
b2/api/types.go Normal file
View File

@@ -0,0 +1,151 @@
package api
import (
"fmt"
"strconv"
"time"
)
// Error describes a B2 error response
type Error struct {
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
Code string `json:"code"` // A single-identifier code that identifies the error.
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
}
// Error statisfies the error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
}
// Account describes a B2 account
type Account struct {
ID string `json:"accountId"` // The identifier for the account.
}
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// Timestamp is a UTC time when this file was uploaded. It is a base
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
// fits in a 64 bit integer such as the type "long" in the programming
// language Java. It is intended to be compatible with Java's time
// long. For example, it can be passed directly into the java call
// Date.setTime(long time).
type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
timestamp := (*time.Time)(t).UTC().UnixNano()
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
}
// UnmarshalJSON turns JSON into a Timestamp
func (t *Timestamp) UnmarshalJSON(data []byte) error {
timestamp, err := strconv.ParseInt(string(data), 10, 64)
if err != nil {
return err
}
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6))
return nil
}
// File is info about a file
type File struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
Size int64 `json:"size"` // The number of bytes in the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
}
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
}
// ListBucketsResponse is as returned from the b2_list_buckets call
type ListBucketsResponse struct {
Buckets []Bucket `json:"buckets"`
}
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
type ListFileNamesRequest struct {
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
}
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
type ListFileNamesResponse struct {
Files []File `json:"files"` // An array of objects, each one describing one file.
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
}
// GetUploadURLRequest is passed to b2_get_upload_url
type GetUploadURLRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
}
// GetUploadURLResponse is received from b2_get_upload_url
type GetUploadURLResponse struct {
BucketID string `json:"bucketId"` // The unique ID of the bucket.
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
}
// FileInfo is received from b2_upload_file and b2_get_file_info
type FileInfo struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
AccountID string `json:"accountId"` // Your account ID.
BucketID string `json:"bucketId"` // The bucket that the file is in.
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
}
// CreateBucketRequest is used to create a bucket
type CreateBucketRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// DeleteBucketRequest is used to create a bucket
type DeleteBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
}
// DeleteFileRequest is used to delete a file version
type DeleteFileRequest struct {
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
Name string `json:"fileName"` // The name of this file.
}
// HideFileRequest is used to delete a file
type HideFileRequest struct {
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
Name string `json:"fileName"` // The name of the file to hide.
}
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
type GetFileInfoRequest struct {
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
}

1106
b2/b2.go Normal file

File diff suppressed because it is too large Load Diff

170
b2/b2_internal_test.go Normal file
View File

@@ -0,0 +1,170 @@
package b2
import (
"testing"
"time"
"github.com/ncw/rclone/fstest"
)
// Test b2 string encoding
// https://www.backblaze.com/b2/docs/string_encoding.html
var encodeTest = []struct {
fullyEncoded string
minimallyEncoded string
plainText string
}{
{fullyEncoded: "%20", minimallyEncoded: "+", plainText: " "},
{fullyEncoded: "%21", minimallyEncoded: "!", plainText: "!"},
{fullyEncoded: "%22", minimallyEncoded: "%22", plainText: "\""},
{fullyEncoded: "%23", minimallyEncoded: "%23", plainText: "#"},
{fullyEncoded: "%24", minimallyEncoded: "$", plainText: "$"},
{fullyEncoded: "%25", minimallyEncoded: "%25", plainText: "%"},
{fullyEncoded: "%26", minimallyEncoded: "%26", plainText: "&"},
{fullyEncoded: "%27", minimallyEncoded: "'", plainText: "'"},
{fullyEncoded: "%28", minimallyEncoded: "(", plainText: "("},
{fullyEncoded: "%29", minimallyEncoded: ")", plainText: ")"},
{fullyEncoded: "%2A", minimallyEncoded: "*", plainText: "*"},
{fullyEncoded: "%2B", minimallyEncoded: "%2B", plainText: "+"},
{fullyEncoded: "%2C", minimallyEncoded: "%2C", plainText: ","},
{fullyEncoded: "%2D", minimallyEncoded: "-", plainText: "-"},
{fullyEncoded: "%2E", minimallyEncoded: ".", plainText: "."},
{fullyEncoded: "%2F", minimallyEncoded: "/", plainText: "/"},
{fullyEncoded: "%30", minimallyEncoded: "0", plainText: "0"},
{fullyEncoded: "%31", minimallyEncoded: "1", plainText: "1"},
{fullyEncoded: "%32", minimallyEncoded: "2", plainText: "2"},
{fullyEncoded: "%33", minimallyEncoded: "3", plainText: "3"},
{fullyEncoded: "%34", minimallyEncoded: "4", plainText: "4"},
{fullyEncoded: "%35", minimallyEncoded: "5", plainText: "5"},
{fullyEncoded: "%36", minimallyEncoded: "6", plainText: "6"},
{fullyEncoded: "%37", minimallyEncoded: "7", plainText: "7"},
{fullyEncoded: "%38", minimallyEncoded: "8", plainText: "8"},
{fullyEncoded: "%39", minimallyEncoded: "9", plainText: "9"},
{fullyEncoded: "%3A", minimallyEncoded: ":", plainText: ":"},
{fullyEncoded: "%3B", minimallyEncoded: ";", plainText: ";"},
{fullyEncoded: "%3C", minimallyEncoded: "%3C", plainText: "<"},
{fullyEncoded: "%3D", minimallyEncoded: "=", plainText: "="},
{fullyEncoded: "%3E", minimallyEncoded: "%3E", plainText: ">"},
{fullyEncoded: "%3F", minimallyEncoded: "%3F", plainText: "?"},
{fullyEncoded: "%40", minimallyEncoded: "@", plainText: "@"},
{fullyEncoded: "%41", minimallyEncoded: "A", plainText: "A"},
{fullyEncoded: "%42", minimallyEncoded: "B", plainText: "B"},
{fullyEncoded: "%43", minimallyEncoded: "C", plainText: "C"},
{fullyEncoded: "%44", minimallyEncoded: "D", plainText: "D"},
{fullyEncoded: "%45", minimallyEncoded: "E", plainText: "E"},
{fullyEncoded: "%46", minimallyEncoded: "F", plainText: "F"},
{fullyEncoded: "%47", minimallyEncoded: "G", plainText: "G"},
{fullyEncoded: "%48", minimallyEncoded: "H", plainText: "H"},
{fullyEncoded: "%49", minimallyEncoded: "I", plainText: "I"},
{fullyEncoded: "%4A", minimallyEncoded: "J", plainText: "J"},
{fullyEncoded: "%4B", minimallyEncoded: "K", plainText: "K"},
{fullyEncoded: "%4C", minimallyEncoded: "L", plainText: "L"},
{fullyEncoded: "%4D", minimallyEncoded: "M", plainText: "M"},
{fullyEncoded: "%4E", minimallyEncoded: "N", plainText: "N"},
{fullyEncoded: "%4F", minimallyEncoded: "O", plainText: "O"},
{fullyEncoded: "%50", minimallyEncoded: "P", plainText: "P"},
{fullyEncoded: "%51", minimallyEncoded: "Q", plainText: "Q"},
{fullyEncoded: "%52", minimallyEncoded: "R", plainText: "R"},
{fullyEncoded: "%53", minimallyEncoded: "S", plainText: "S"},
{fullyEncoded: "%54", minimallyEncoded: "T", plainText: "T"},
{fullyEncoded: "%55", minimallyEncoded: "U", plainText: "U"},
{fullyEncoded: "%56", minimallyEncoded: "V", plainText: "V"},
{fullyEncoded: "%57", minimallyEncoded: "W", plainText: "W"},
{fullyEncoded: "%58", minimallyEncoded: "X", plainText: "X"},
{fullyEncoded: "%59", minimallyEncoded: "Y", plainText: "Y"},
{fullyEncoded: "%5A", minimallyEncoded: "Z", plainText: "Z"},
{fullyEncoded: "%5B", minimallyEncoded: "%5B", plainText: "["},
{fullyEncoded: "%5C", minimallyEncoded: "%5C", plainText: "\\"},
{fullyEncoded: "%5D", minimallyEncoded: "%5D", plainText: "]"},
{fullyEncoded: "%5E", minimallyEncoded: "%5E", plainText: "^"},
{fullyEncoded: "%5F", minimallyEncoded: "_", plainText: "_"},
{fullyEncoded: "%60", minimallyEncoded: "%60", plainText: "`"},
{fullyEncoded: "%61", minimallyEncoded: "a", plainText: "a"},
{fullyEncoded: "%62", minimallyEncoded: "b", plainText: "b"},
{fullyEncoded: "%63", minimallyEncoded: "c", plainText: "c"},
{fullyEncoded: "%64", minimallyEncoded: "d", plainText: "d"},
{fullyEncoded: "%65", minimallyEncoded: "e", plainText: "e"},
{fullyEncoded: "%66", minimallyEncoded: "f", plainText: "f"},
{fullyEncoded: "%67", minimallyEncoded: "g", plainText: "g"},
{fullyEncoded: "%68", minimallyEncoded: "h", plainText: "h"},
{fullyEncoded: "%69", minimallyEncoded: "i", plainText: "i"},
{fullyEncoded: "%6A", minimallyEncoded: "j", plainText: "j"},
{fullyEncoded: "%6B", minimallyEncoded: "k", plainText: "k"},
{fullyEncoded: "%6C", minimallyEncoded: "l", plainText: "l"},
{fullyEncoded: "%6D", minimallyEncoded: "m", plainText: "m"},
{fullyEncoded: "%6E", minimallyEncoded: "n", plainText: "n"},
{fullyEncoded: "%6F", minimallyEncoded: "o", plainText: "o"},
{fullyEncoded: "%70", minimallyEncoded: "p", plainText: "p"},
{fullyEncoded: "%71", minimallyEncoded: "q", plainText: "q"},
{fullyEncoded: "%72", minimallyEncoded: "r", plainText: "r"},
{fullyEncoded: "%73", minimallyEncoded: "s", plainText: "s"},
{fullyEncoded: "%74", minimallyEncoded: "t", plainText: "t"},
{fullyEncoded: "%75", minimallyEncoded: "u", plainText: "u"},
{fullyEncoded: "%76", minimallyEncoded: "v", plainText: "v"},
{fullyEncoded: "%77", minimallyEncoded: "w", plainText: "w"},
{fullyEncoded: "%78", minimallyEncoded: "x", plainText: "x"},
{fullyEncoded: "%79", minimallyEncoded: "y", plainText: "y"},
{fullyEncoded: "%7A", minimallyEncoded: "z", plainText: "z"},
{fullyEncoded: "%7B", minimallyEncoded: "%7B", plainText: "{"},
{fullyEncoded: "%7C", minimallyEncoded: "%7C", plainText: "|"},
{fullyEncoded: "%7D", minimallyEncoded: "%7D", plainText: "}"},
{fullyEncoded: "%7E", minimallyEncoded: "~", plainText: "~"},
{fullyEncoded: "%7F", minimallyEncoded: "%7F", plainText: "\u007f"},
{fullyEncoded: "%E8%87%AA%E7%94%B1", minimallyEncoded: "%E8%87%AA%E7%94%B1", plainText: "自由"},
{fullyEncoded: "%F0%90%90%80", minimallyEncoded: "%F0%90%90%80", plainText: "𐐀"},
}
func TestUrlEncode(t *testing.T) {
for _, test := range encodeTest {
got := urlEncode(test.plainText)
if got != test.minimallyEncoded && got != test.fullyEncoded {
t.Errorf("urlEncode(%q) got %q wanted %q or %q", test.plainText, got, test.minimallyEncoded, test.fullyEncoded)
}
}
}
func TestTimeString(t *testing.T) {
for _, test := range []struct {
in time.Time
want string
}{
{fstest.Time("1970-01-01T00:00:00.000000000Z"), "0"},
{fstest.Time("2001-02-03T04:05:10.123123123Z"), "981173110123"},
{fstest.Time("2001-02-03T05:05:10.123123123+01:00"), "981173110123"},
} {
got := timeString(test.in)
if test.want != got {
t.Logf("%v: want %v got %v", test.in, test.want, got)
}
}
}
func TestParseTimeString(t *testing.T) {
for _, test := range []struct {
in string
want time.Time
wantError string
}{
{"0", fstest.Time("1970-01-01T00:00:00.000000000Z"), ""},
{"981173110123", fstest.Time("2001-02-03T04:05:10.123000000Z"), ""},
{"", time.Time{}, ""},
{"potato", time.Time{}, `strconv.ParseInt: parsing "potato": invalid syntax`},
} {
o := Object{}
err := o.parseTimeString(test.in)
got := o.modTime
var gotError string
if err != nil {
gotError = err.Error()
}
if test.want != got {
t.Logf("%v: want %v got %v", test.in, test.want, got)
}
if test.wantError != gotError {
t.Logf("%v: want error %v got error %v", test.in, test.wantError, gotError)
}
}
}

56
b2/b2_test.go Normal file
View File

@@ -0,0 +1,56 @@
// Test B2 filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package b2_test
import (
"testing"
"github.com/ncw/rclone/b2"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func init() {
fstests.NilObject = fs.Object((*b2.Object)(nil))
fstests.RemoteName = "TestB2:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash
set -e

View File

@@ -21,17 +21,19 @@ Rclone is a command line program to sync files and directories to and from
* Amazon Cloud Drive
* Microsoft One Drive
* Hubic
* Backblaze B2
* Yandex Disk
* The local filesystem
Features
* MD5SUMs checked at all times for file integrity
* MD5/SHA1 hashes checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* Copy mode to just copy new/changed files
* Sync mode to make a directory identical
* Check mode to check all MD5SUMs
* Can sync to and from network, eg two different Drive accounts
* Sync (one way) mode to make a directory identical
* Check mode to check for file hash equality
* Can sync to and from network, eg two different cloud accounts
Links

View File

@@ -27,16 +27,31 @@ d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) amazon cloud drive
2) drive
3) dropbox
4) google cloud storage
5) local
6) s3
7) swift
type> 1
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Amazon Cloud Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Dropbox
\ "dropbox"
5 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
6 / Google Drive
\ "drive"
7 / Hubic
\ "hubic"
8 / Local Disk
\ "local"
9 / Microsoft OneDrive
\ "onedrive"
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
11 / Yandex Disk
\ "yandex"
Storage> 1
Amazon Application Client Id - leave blank normally.
client_id>
Amazon Application Client Secret - leave blank normally.
@@ -58,6 +73,9 @@ d) Delete this remote
y/e/d> y
```
See the [remote setup docs](/remote_setup/) for how to set it up on a
machine with no Internet browser available.
Note that rclone runs a webserver on your local machine to collect the
token as returned from Amazon. This only runs from the moment it
opens your browser to the moment you get back the verification
@@ -93,6 +111,22 @@ don't provide an API to permanently delete files, nor to empty the
trash, so you will have to do that with one of Amazon's apps or via
the Amazon cloud drive website.
### Specific options ###
Here are the command line options specific to this cloud storage
system.
#### --acd-templink-threshold=SIZE ####
Files this size or more will be downloaded via their `tempLink`. This
is to work around a problem with Amazon Cloud Drive which blocks
downloads of files bigger than about 10GB. The default for this is
9GB which shouldn't need to be changed.
To download files above this threshold, rclone requests a `tempLink`
which downloads the file through a temporary URL directly from the
underlying S3 storage.
### Limitations ###
Note that Amazon cloud drive is case insensitive so you can't have a
@@ -102,3 +136,15 @@ Amazon cloud drive has rate limiting so you may notice errors in the
sync (429 errors). rclone will automatically retry the sync up to 3
times by default (see `--retries` flag) which should hopefully work
around this problem.
Amazon cloud drive has an internal limit of file sizes that can be
uploaded to the service. This limit is not officially published,
but all files larger than this will fail.
At the time of writing (Jan 2016) is in the area of 50GB per file.
This means that larger files are likely to fail.
Unfortunatly there is no way for rclone to see that this failure is
because of file size, so it will retry the operation, as any other
failure. To avoid this problem, use `--max-size=50GB` option to limit
the maximum size of uploaded files.

View File

@@ -18,3 +18,12 @@ Contributors
* Colin Nicholson <colin@colinn.com>
* Klaus Post <klauspost@gmail.com>
* Sergey Tolmachev <tolsi.ru@gmail.com>
* Adriano Aurélio Meirelles <adriano@atinge.com>
* C. Bess <cbess@users.noreply.github.com>
* Dmitry Burdeev <dibu28@gmail.com>
* Joseph Spurrier <github@josephspurrier.com>
* Björn Harrtell <bjorn@wololo.org>
* Xavier Lucas <xavier.lucas@corp.ovh.com>
* Werner Beroux <werner@beroux.com>
* Brian Stengaard <brian@stengaard.eu>
* Jakub Gedeon <jgedeon@sofi.com>

137
docs/content/b2.md Normal file
View File

@@ -0,0 +1,137 @@
---
title: "B2"
description: "Backblaze B2"
date: "2015-12-29"
---
<i class="fa fa-fire"></i>Backblaze B2
----------------------------------------
B2 is [Backblaze's cloud storage system](https://www.backblaze.com/b2/).
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, eg `remote:bucket/path/to/dir`.
Here is an example of making a b2 configuration. First run
rclone config
This will guide you through an interactive setup process. You will
need your account number (a short hex number) and key (a long hex
number) which you can get from the b2 control panel.
```
No remotes found - make a new one
n) New remote
q) Quit config
n/q> n
name> remote
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Amazon Cloud Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Dropbox
\ "dropbox"
5 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
6 / Google Drive
\ "drive"
7 / Hubic
\ "hubic"
8 / Local Disk
\ "local"
9 / Microsoft OneDrive
\ "onedrive"
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
11 / Yandex Disk
\ "yandex"
Storage> 3
Account ID
account> 123456789abc
Application Key
key> 0123456789abcdef0123456789abcdef0123456789
Endpoint for the service - leave blank normally.
endpoint>
Remote config
--------------------
[remote]
account = 123456789abc
key = 0123456789abcdef0123456789abcdef0123456789
endpoint =
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
This remote is called `remote` and can now be used like this
See all buckets
rclone lsd remote:
Make a new bucket
rclone mkdir remote:bucket
List the contents of a bucket
rclone ls remote:bucket
Sync `/home/local/directory` to the remote bucket, deleting any
excess files in the bucket.
rclone sync /home/local/directory remote:bucket
### Modified time ###
The modified time is stored as metadata on the object as
`X-Bz-Info-src_last_modified_millis` as milliseconds since 1970-01-01
in the Backblaze standard. Other tools should be able to use this as
a modified time.
Modified times are used in syncing and are fully supported except in
the case of updating a modification time on an existing object. In
this case the object will be uploaded again as B2 doesn't have an API
method to set the modification time independent of doing an upload.
### SHA1 checksums ###
The SHA1 checksums of the files are checked on upload and download and
will be used in the syncing process. You can use the `--checksum` flag.
### Versions ###
When rclone uploads a new version of a file it creates a [new version
of it](https://www.backblaze.com/b2/docs/file_versions.html).
Likewise when you delete a file, the old version will still be
available.
The old versions of files are visible in the B2 web interface, but not
via rclone yet.
Rclone doesn't provide any way of managing old versions (downloading
them or deleting them) at the moment. When you `purge` a bucket, all
the old versions will be deleted.
### Transfers ###
Backblaze recommends that you do lots of transfers simultaneously for
maximum speed. In tests from my SSD equiped laptop the optimum
setting is about `--transfers 32` though higher numbers may be used
for a slight speed improvement. The optimum number for you may vary
depending on your hardware, how big the files are, how much you want
to load your computer, etc. The default of `--transfers 4` is
definitely too low for Backblaze B2 though.
### API ###
Here are [some notes I made on the backblaze
API](https://gist.github.com/ncw/166dabf352b399f1cc1c) while
integrating it with rclone which detail the changes I'd like to see.

View File

@@ -1,12 +1,119 @@
---
title: "Documentation"
description: "Rclone Changelog"
date: "2015-11-07"
date: "2016-04-18"
---
Changelog
---------
* v1.29 - 2016-04-18
* New Features
* Implement `-I, --ignore-times` for unconditional upload
* Improve `dedupe`command
* Now removes identical copies without asking
* Now obeys `--dry-run`
* Implement `--dedupe-mode` for non interactive running
* `--dedupe-mode interactive` - interactive the default.
* `--dedupe-mode skip` - removes identical files then skips anything left.
* `--dedupe-mode first` - removes identical files then keeps the first one.
* `--dedupe-mode newest` - removes identical files then keeps the newest one.
* `--dedupe-mode oldest` - removes identical files then keeps the oldest one.
* `--dedupe-mode rename` - removes identical files then renames the rest to be different.
* Bug fixes
* Make rclone check obey the `--size-only` flag.
* Use "application/octet-stream" if discovered mime type is invalid.
* Fix missing "quit" option when there are no remotes.
* Google Drive
* Increase default chunk size to 8 MB - increases upload speed of big files
* Speed up directory listings and make more reliable
* Add missing retries for Move and DirMove - increases reliability
* Preserve mime type on file update
* Backblaze B2
* Enable mod time syncing
* This means that B2 will now check modification times
* It will upload new files to update the modification times
* (there isn't an API to just set the mod time.)
* If you want the old behaviour use `--size-only`.
* Update API to new version
* Fix parsing of mod time when not in metadata
* Swift/Hubic
* Don't return an MD5SUM for static large objects
* S3
* Fix uploading files bigger than 50GB
* v1.28 - 2016-03-01
* New Features
* Configuration file encryption - thanks Klaus Post
* Improve `rclone config` adding more help and making it easier to understand
* Implement `-u`/`--update` so creation times can be used on all remotes
* Implement `--low-level-retries` flag
* Optionally disable gzip compression on downloads with `--no-gzip-encoding`
* Bug fixes
* Don't make directories if `--dry-run` set
* Fix and document the `move` command
* Fix redirecting stderr on unix-like OSes when using `--log-file`
* Fix `delete` command to wait until all finished - fixes missing deletes.
* Backblaze B2
* Use one upload URL per go routine fixes `more than one upload using auth token`
* Add pacing, retries and reauthentication - fixes token expiry problems
* Upload without using a temporary file from local (and remotes which support SHA1)
* Fix reading metadata for all files when it shouldn't have been
* Drive
* Fix listing drive documents at root
* Disable copy and move for Google docs
* Swift
* Fix uploading of chunked files with non ASCII characters
* Allow setting of `storage_url` in the config - thanks Xavier Lucas
* S3
* Allow IAM role and credentials from environment variables - thanks Brian Stengaard
* Allow low privilege users to use S3 (check if directory exists during Mkdir) - thanks Jakub Gedeon
* Amazon Cloud Drive
* Retry on more things to make directory listings more reliable
* v1.27 - 2016-01-31
* New Features
* Easier headless configuration with `rclone authorize`
* Add support for multiple hash types - we now check SHA1 as well as MD5 hashes.
* `delete` command which does obey the filters (unlike `purge`)
* `dedupe` command to deduplicate a remote. Useful with Google Drive.
* Add `--ignore-existing` flag to skip all files that exist on destination.
* Add `--delete-before`, `--delete-during`, `--delete-after` flags.
* Add `--memprofile` flag to debug memory use.
* Warn the user about files with same name but different case
* Make `--include` rules add their implict exclude * at the end of the filter list
* Deprecate compiling with go1.3
* Amazon Cloud Drive
* Fix download of files > 10 GB
* Fix directory traversal ("Next token is expired") for large directory listings
* Remove 409 conflict from error codes we will retry - stops very long pauses
* Backblaze B2
* SHA1 hashes now checked by rclone core
* Drive
* Add `--drive-auth-owner-only` to only consider files owned by the user - thanks Björn Harrtell
* Export Google documents
* Dropbox
* Make file exclusion error controllable with -q
* Swift
* Fix upload from unprivileged user.
* S3
* Fix updating of mod times of files with `+` in.
* Local
* Add local file system option to disable UNC on Windows.
* v1.26 - 2016-01-02
* New Features
* Yandex storage backend - thank you Dmitry Burdeev ("dibu")
* Implement Backblaze B2 storage backend
* Add --min-age and --max-age flags - thank you Adriano Aurélio Meirelles
* Make ls/lsl/md5sum/size/check obey includes and excludes
* Fixes
* Fix crash in http logging
* Upload releases to github too
* Swift
* Fix sync for chunked files
* One Drive
* Re-enable server side copy
* Don't mask HTTP error codes with JSON decode error
* S3
* Fix corrupting Content-Type on mod time update (thanks Joseph Spurrier)
* v1.25 - 2015-11-14
* New features
* Implement Hubic storage system

View File

@@ -25,6 +25,11 @@ See the following for detailed instructions for
* [Dropbox](/dropbox/)
* [Google Cloud Storage](/googlecloudstorage/)
* [Local filesystem](/local/)
* [Amazon Cloud Drive](/amazonclouddrive/)
* [Backblaze B2](/b2/)
* [Hubic](/hubic/)
* [Microsoft One Drive](/onedrive/)
* [Yandex Disk](/yandex/)
Usage
-----
@@ -50,13 +55,74 @@ Copy the source to the destination. Doesn't transfer
unchanged files, testing by size and modification time or
MD5SUM. Doesn't delete files from the destination.
Note that it is always the contents of the directory that is synced,
not the directory so when source:path is a directory, it's the
contents of source:path that are copied, not the directory name and
contents.
If dest:path doesn't exist, it is created and the source:path contents
go there.
For example
rclone copy source:sourcepath dest:destpath
Let's say there are two files in sourcepath
sourcepath/one.txt
sourcepath/two.txt
This copies them to
destpath/one.txt
destpath/two.txt
Not to
destpath/sourcepath/one.txt
destpath/sourcepath/two.txt
If you are familiar with `rsync`, rclone always works as if you had
written a trailing / - meaning "copy the contents of this directory".
This applies to all commands and whether you are talking about the
source or destination.
### rclone sync source:path dest:path ###
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary. Since this can
cause data loss, test first with the `--dry-run` flag.
source, including deleting files if necessary.
**Important**: Since this can cause data loss, test first with the
`--dry-run` flag to see exactly what would be copied and deleted.
Note that files in the destination won't be deleted if there were any
errors at any point.
It is always the contents of the directory that is synced, not the
directory so when source:path is a directory, it's the contents of
source:path that are copied, not the directory name and contents. See
extended explanation in the `copy` command above if unsure.
If dest:path doesn't exist, it is created and the source:path contents
go there.
### move source:path dest:path ###
Moves the source to the destination.
If there are no filters in use this is equivalent to a copy followed
by a purge, but may using server side operations to speed it up if
possible.
If filters are in use then it is equivalent to a copy followed by
delete, followed by an rmdir (which only removes the directory if
empty). The individual file moves will be moved with srver side
operations if possible.
**Important**: Since this can cause data loss, test first with the
--dry-run flag.
### rclone ls remote:path ###
@@ -76,6 +142,11 @@ size and path.
Produces an md5sum file for all the objects in the path. This
is in the same format as the standard md5sum tool produces.
### rclone sha1sum remote:path ###
Produces an sha1sum file for all the objects in the path. This
is in the same format as the standard sha1sum tool produces.
### rclone size remote:path ###
Prints the total size of objects in remote:path and the number of
@@ -92,7 +163,28 @@ objects in it, use purge for that.
### rclone purge remote:path ###
Remove the path and all of its contents.
Remove the path and all of its contents. Note that this does not obey
include/exclude filters - everything will be removed. Use `delete` if
you want to selectively delete files.
### rclone delete remote:path ###
Remove the contents of path. Unlike `purge` it obeys include/exclude
filters so can be used to selectively delete files.
Eg delete all files bigger than 100MBytes
Check what would be deleted first (use either)
rclone --min-size 100M lsl remote:path
rclone --dry-run --min-size 100M delete remote:path
Then delete
rclone --min-size 100M delete remote:path
That reads "delete everything with a minimum size of 100 MB", hence
delete all files bigger than 100MBytes.
### rclone check source:path dest:path ###
@@ -100,6 +192,87 @@ Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
`--size-only` may be used to only compare the sizes, not the MD5SUMs.
### rclone dedupe remote:path ###
By default `dedup` interactively finds duplicate files and offers to
delete all but one or rename them to be different. Only useful with
Google Drive which can have duplicate file names.
The `dedupe` command will delete all but one of any identical (same
md5sum) files it finds without confirmation. This means that for most
duplicated files the `dedupe` command will not be interactive. You
can use `--dry-run` to see what would happen without doing anything.
Here is an example run.
Before - with duplicates
```
$ rclone lsl drive:dupes
6048320 2016-03-05 16:23:16.798000000 one.txt
6048320 2016-03-05 16:23:11.775000000 one.txt
564374 2016-03-05 16:23:06.731000000 one.txt
6048320 2016-03-05 16:18:26.092000000 one.txt
6048320 2016-03-05 16:22:46.185000000 two.txt
1744073 2016-03-05 16:22:38.104000000 two.txt
564374 2016-03-05 16:22:52.118000000 two.txt
```
Now the `dedupe` session
```
$ rclone dedupe drive:dupes
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
one.txt: Found 4 duplicates - deleting identical copies
one.txt: Deleting 2/3 identical duplicates (md5sum "1eedaa9fe86fd4b8632e2ac549403b36")
one.txt: 2 duplicates remain
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
2: 564374 bytes, 2016-03-05 16:23:06.731000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
s/k/r> k
Enter the number of the file to keep> 1
one.txt: Deleted 1 extra copies
two.txt: Found 3 duplicates - deleting identical copies
two.txt: 3 duplicates remain
1: 564374 bytes, 2016-03-05 16:22:52.118000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, md5sum 851957f7fb6f0bc4ce76be966d336802
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
s/k/r> r
two-1.txt: renamed from: two.txt
two-2.txt: renamed from: two.txt
two-3.txt: renamed from: two.txt
```
The result being
```
$ rclone lsl drive:dupes
6048320 2016-03-05 16:23:16.798000000 one.txt
564374 2016-03-05 16:22:52.118000000 two-1.txt
6048320 2016-03-05 16:22:46.185000000 two-2.txt
1744073 2016-03-05 16:22:38.104000000 two-3.txt
```
Dedupe can be run non interactively using the `--dedupe-mode` flag.
* `--dedupe-mode interactive` - interactive as above.
* `--dedupe-mode skip` - removes identical files then skips anything left.
* `--dedupe-mode first` - removes identical files then keeps the first one.
* `--dedupe-mode newest` - removes identical files then keeps the newest one.
* `--dedupe-mode oldest` - removes identical files then keeps the oldest one.
* `--dedupe-mode rename` - removes identical files then renames the rest to be different.
For example to rename all the identically named photos in your Google Photos directory, do
rclone dedupe --dedupe-mode rename "drive:Google Photos"
### rclone config ###
Enter an interactive configuration session.
@@ -176,11 +349,15 @@ The default is to run 8 checkers in parallel.
Normally rclone will look at modification time and size of files to
see if they are equal. If you set this flag then rclone will check
MD5SUM and size to determine if files are equal.
the file hash and size to determine if files are equal.
This is useful when the remote doesn't support setting modified time
and a more accurate sync is desired than just checking the file size.
This is very useful when transferring between remotes which store the
MD5SUM on the object which include swift, s3, drive, and google cloud
storage.
same hash type on the object, eg Drive and Swift. For details of which
remotes support which hash type see the table in the [overview
section](/overview/).
Eg `rclone --checksum sync s3:/bucket swift:/bucket` would run much
quicker than without the `--checksum` flag.
@@ -205,11 +382,33 @@ The connection timeout is the amount of time rclone will wait for a
connection to go through to a remote object storage system. It is
`1m` by default.
### --dedupe-mode MODE ###
Mode to run dedupe command in. One of `interactive`, `skip`, `first`, `newest`, `oldest`, `rename`. The default is `interactive`. See the dedupe command for more information as to what these options mean.
### -n, --dry-run ###
Do a trial run with no permanent changes. Use this in combination
with the `-v` flag to see what rclone would do without actually doing
it. Useful when setting up the `sync` command.
Do a trial run with no permanent changes. Use this to see what rclone
would do without actually doing it. Useful when setting up the `sync`
command which deletes files in the destination.
### --ignore-existing ###
Using this option will make rclone unconditionally skip all files
that exist on the destination, no matter the content of these files.
While this isn't a generally recommended option, it can be useful
in cases where your files change due to encryption. However, it cannot
correct partial transfers in case a transfer was interrupted.
### -I, --ignore-times ###
Using this option will cause rclone to unconditionally upload all
files regardless of the state of files on the destination.
Normally rclone would skip any files that have the same
modification time and are the same size (or have the same checksum if
using `--checksum`).
### --log-file=FILE ###
@@ -217,6 +416,22 @@ Log all of rclone's output to FILE. This is not active by default.
This can be useful for tracking down problems with syncs in
combination with the `-v` flag.
### --low-level-retries NUMBER ###
This controls the number of low level retries rclone does.
A low level retry is used to retry a failing operation - typically one
HTTP request. This might be uploading a chunk of a big file for
example. You will see low level retries in the log with the `-v`
flag.
This shouldn't need to be changed from the default in normal
operations, however if you get a lot of low level retries you may wish
to reduce the value so rclone moves on to a high level retry (see the
`--retries` flag) quicker.
Disable low level retries with `--low-level-retries 1`.
### --modify-window=TIME ###
When checking whether a file has been modified, this is the maximum
@@ -230,11 +445,30 @@ if you are reading and writing to an OS X filing system this will be
This command line flag allows you to override that computed default.
### --no-gzip-encoding ###
Don't set `Accept-Encoding: gzip`. This means that rclone won't ask
the server for compressed files automatically. Useful if you've set
the server to return files with `Content-Encoding: gzip` but you
uploaded compressed files.
There is no need to set this in normal operation, and doing so will
decrease the network transfer efficiency of rclone.
### -q, --quiet ###
Normally rclone outputs stats and a completion message. If you set
this flag it will make as little output as possible.
### --retries int ###
Retry the entire sync if it fails this many times it fails (default 3).
Some remotes can be unreliable and a few retries helps pick up the
files which didn't get transferred because of errors.
Disable retries with `--retries 1`.
### --size-only ###
Normally rclone will look at modification time and size of files to
@@ -256,6 +490,21 @@ This sets the interval.
The default is `1m`. Use 0 to disable.
### --delete-(before,during,after) ###
This option allows you to specify when files on your destination are
deleted when you sync folders.
Specifying the value `--delete-before` will delete all files present on the
destination, but not on the source *before* starting the transfer
of any new or updated files.
Specifying `--delete-during` (default value) will delete files while checking
and uploading files. This is usually the fastest option.
Specifying `--delete-after` will delay deletion of files until all new/updated
files have been successfully transfered.
### --timeout=TIME ###
This sets the IO idle timeout. If a transfer has started but then
@@ -271,6 +520,25 @@ of timeouts or bigger if you have lots of bandwidth and a fast remote.
The default is to run 4 file transfers in parallel.
### -u, --update ###
This forces rclone to skip any files which exist on the destination
and have a modified time that is newer than the source file.
If an existing destination file has a modification time equal (within
the computed modify window precision) to the source file's, it will be
updated if the sizes are different.
On remotes which don't support mod time directly the time checked will
be the uploaded time. This means that if uploading to one of these
remoes, rclone will skip any files which exist on the destination and
have an uploaded time that is newer than the modification time of the
source file.
This can be useful when transferring to a remote which doesn't support
mod times directly as it is more accurate than a `--size-only` check
and faster than using `--checksum`.
### -v, --verbose ###
If you set this flag, rclone will become very verbose telling you
@@ -282,17 +550,108 @@ Very useful for debugging.
Prints the version number
Configuration Encryption
------------------------
Your configuration file contains information for logging in to
your cloud services. This means that you should keep your
`.rclone.conf` file in a secure location.
If you are in an environment where that isn't possible, you can
add a password to your configuration. This means that you will
have to enter the password every time you start rclone.
To add a password to your rclone configuration, execute `rclone config`.
```
>rclone config
Current remotes:
e) Edit existing remote
n) New remote
d) Delete remote
s) Set configuration password
q) Quit config
e/n/d/s/q>
```
Go into `s`, Set configuration password:
```
e/n/d/s/q> s
Your configuration is not encrypted.
If you add a password, you will protect your login information to cloud services.
a) Add Password
q) Quit to main menu
a/q> a
Enter NEW configuration password:
password>
Confirm NEW password:
password>
Password set
Your configuration is encrypted.
c) Change Password
u) Unencrypt configuration
q) Quit to main menu
c/u/q>
```
Your configuration is now encrypted, and every time you start rclone
you will now be asked for the password. In the same menu you can
change the password or completely remove encryption from your
configuration.
There is no way to recover the configuration if you lose your password.
rclone uses [nacl secretbox](https://godoc.org/golang.org/x/crypto/nacl/secretbox)
which in term uses XSalsa20 and Poly1305 to encrypt and authenticate
your configuration with secret-key cryptography.
The password is SHA-256 hashed, which produces the key for secretbox.
The hashed password is not stored.
While this provides very good security, we do not recommend storing
your encrypted rclone configuration in public, if it contains sensitive
information, maybe except if you use a very strong password.
If it is safe in your environment, you can set the `RCLONE_CONFIG_PASS`
environment variable to contain your password, in which case it will be
used for decrypting the configuration.
If you are running rclone inside a script, you might want to disable
password prompts. To do that, pass the parameter
`--ask-password=false` to rclone. This will make rclone fail instead
of asking for a password, if if `RCLONE_CONFIG_PASS` doesn't contain
a valid password.
Developer options
-----------------
These options are useful when developing or debugging rclone. There
are also some more remote specific options which aren't documented
here which are used for testing. These start with remote name eg
`--drive-test-option`.
`--drive-test-option` - see the docs for the remote in question.
### --cpuprofile=FILE ###
Write cpu profile to file. This can be analysed with `go tool pprof`.
Write CPU profile to file. This can be analysed with `go tool pprof`.
### --dump-bodies ###
Dump HTTP headers and bodies - may contain sensitive info. Can be
very verbose. Useful for debugging only.
### --dump-filters ###
Dump the filters to the output. Useful to see exactly what include
and exclude options are filtering on.
### --dump-headers ###
Dump HTTP headers - may contain sensitive info. Can be very verbose.
Useful for debugging only.
### --memprofile=FILE ###
Write memory profile to file. This can be analysed with `go tool pprof`.
### --no-check-certificate=true/false ###
@@ -321,6 +680,16 @@ For the filtering options
* `--files-from`
* `--min-size`
* `--max-size`
* `--min-age`
* `--max-age`
* `--dump-filters`
See the [filtering section](/filtering/).
Exit Code
---------
If any errors occurred during the command, rclone will set a non zero
exit code. This allows scripts to detect when rclone operations have
failed.

View File

@@ -2,38 +2,40 @@
title: "Rclone downloads"
description: "Download rclone binaries for your OS."
type: page
date: "2015-11-14"
date: "2016-04-18"
---
Rclone Download v1.25
Rclone Download v1.29
=====================
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.25-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.25-windows-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.29-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.29-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.25-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.25-osx-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.29-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.29-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.25-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.25-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.25-linux-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.29-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.29-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.29-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.25-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.25-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.25-freebsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.29-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.29-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.29-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.25-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.25-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.25-netbsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.29-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.29-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.29-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.25-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.25-openbsd-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.29-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.29-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.25-plan9-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.25-plan9-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.29-plan9-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.29-plan9-amd64.zip)
* Solaris
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.25-solaris-amd64.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.29-solaris-amd64.zip)
You can also find a [mirror of the downloads on github](https://github.com/ncw/rclone/releases/tag/v1.29).
Downloads for scripting
=======================

View File

@@ -35,6 +35,8 @@ Rclone Download VERSION
* Solaris
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-solaris-amd64.zip)
You can also find a [mirror of the downloads on github](https://github.com/ncw/rclone/releases/tag/VERSION).
Downloads for scripting
=======================

View File

@@ -1,7 +1,7 @@
---
title: "Google drive"
description: "Rclone docs for Google drive"
date: "2015-09-12"
date: "2016-04-12"
---
<i class="fa fa-google"></i> Google Drive
@@ -27,13 +27,31 @@ d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) drive
type> 4
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Amazon Cloud Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Dropbox
\ "dropbox"
5 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
6 / Google Drive
\ "drive"
7 / Hubic
\ "hubic"
8 / Local Disk
\ "local"
9 / Microsoft OneDrive
\ "onedrive"
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
11 / Yandex Disk
\ "yandex"
Storage> 6
Google Application Client Id - leave blank normally.
client_id>
Google Application Client Secret - leave blank normally.
@@ -111,22 +129,75 @@ system.
#### --drive-chunk-size=SIZE ####
Upload chunk size. Must a power of 2 >= 256k. Default value is 256kB.
Upload chunk size. Must a power of 2 >= 256k. Default value is 8 MB.
Making this larger will improve performance, but note that each chunk
is buffered in memory one per transfer.
Reducing this will reduce memory usage but decrease performance.
#### --drive-full-list ####
Use a full listing for directory list. More data but usually
quicker. On by default, disable with `--full-drive-list=false`.
No longer does anything - kept for backwards compatibility.
#### --drive-upload-cutoff=SIZE ####
File size cutoff for switching to chunked upload. Default is 256kB.
File size cutoff for switching to chunked upload. Default is 8 MB.
#### --drive-use-trash ####
Send files to the trash instead of deleting permanently. Defaults to
off, namely deleting files permanently.
#### --drive-auth-owner-only ####
Only consider files owned by the authenticated user. Requires
that --drive-full-list=true (default).
#### --drive-formats ####
Google documents can only be exported from Google drive. When rclone
downloads a Google doc it chooses a format to download depending upon
this setting.
By default the formats are `docx,xlsx,pptx,svg` which are a sensible
default for an editable document.
When choosing a format, rclone runs down the list provided in order
and chooses the first file format the doc can be exported as from the
list. If the file can't be exported to a format on the formats list,
then rclone will choose a format from the default list.
If you prefer an archive copy then you might use `--drive-formats
pdf`, or if you prefer openoffice/libreoffice formats you might use
`--drive-formats ods,odt`.
Note that rclone adds the extension to the google doc, so if it is
calles `My Spreadsheet` on google docs, it will be exported as `My
Spreadsheet.xlsx` or `My Spreadsheet.pdf` etc.
Here are the possible extensions with their corresponding mime types.
| Extension | Mime Type | Description |
| --------- |-----------| ------------|
| csv | text/csv | Standard CSV format for Spreadsheets |
| doc | application/msword | Micosoft Office Document |
| docx | application/vnd.openxmlformats-officedocument.wordprocessingml.document | Microsoft Office Document |
| html | text/html | An HTML Document |
| jpg | image/jpeg | A JPEG Image File |
| ods | application/vnd.oasis.opendocument.spreadsheet | Openoffice Spreadsheet |
| ods | application/x-vnd.oasis.opendocument.spreadsheet | Openoffice Spreadsheet |
| odt | application/vnd.oasis.opendocument.text | Openoffice Document |
| pdf | application/pdf | Adobe PDF Format |
| png | image/png | PNG Image Format|
| pptx | application/vnd.openxmlformats-officedocument.presentationml.presentation | Microsoft Office Powerpoint |
| rtf | application/rtf | Rich Text Format |
| svg | image/svg+xml | Scalable Vector Graphics Format |
| txt | text/plain | Plain Text |
| xls | application/vnd.ms-excel | Microsoft Office Spreadsheet |
| xlsx | application/vnd.openxmlformats-officedocument.spreadsheetml.sheet | Microsoft Office Spreadsheet |
| zip | application/zip | A ZIP file of HTML, Images CSS |
### Limitations ###
Drive has quite a lot of rate limiting. This causes rclone to be

View File

@@ -1,7 +1,7 @@
---
title: "Dropbox"
description: "Rclone docs for Dropbox"
date: "2014-07-17"
date: "2016-02-21"
---
<i class="fa fa-dropbox"></i> Dropbox
@@ -28,15 +28,31 @@ d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) google cloud storage
5) dropbox
6) drive
type> 5
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Amazon Cloud Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Dropbox
\ "dropbox"
5 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
6 / Google Drive
\ "drive"
7 / Hubic
\ "hubic"
8 / Local Disk
\ "local"
9 / Microsoft OneDrive
\ "onedrive"
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
11 / Yandex Disk
\ "yandex"
Storage> 4
Dropbox App Key - leave blank normally.
app_key>
Dropbox App Secret - leave blank normally.
@@ -73,8 +89,18 @@ To copy a local directory to a dropbox directory called backup
### Modified time and MD5SUMs ###
Dropbox doesn't have the capability of storing modification times or
MD5SUMs so syncs will effectively have the `--size-only` flag set.
Dropbox doesn't provide the ability to set modification times in the
V1 public API, so rclone can't support modified time with Dropbox.
This may change in the future - see these issues for details:
* [Dropbox V2 API](https://github.com/ncw/rclone/issues/349)
* [Allow syncs for remotes that can't set modtime on existing objects](https://github.com/ncw/rclone/issues/348)
Dropbox doesn't return any sort of checksum (MD5 or SHA1).
Together that means that syncs to dropbox will effectively have the
`--size-only` flag set.
### Specific options ###
@@ -96,3 +122,8 @@ store. There is a full list of them in the ["Ignored Files" section
of this document](https://www.dropbox.com/en/help/145). Rclone will
issue an error message `File name disallowed - not uploading` if it
attempt to upload one of those file names, but the sync won't fail.
If you have more than 10,000 files in a directory then `rclone purge
dropbox:dir` will return the error `Failed to purge: There are too
many files involved in this operation`. As a work-around do an
`rclone delete dropbix:dir` followed by an `rclone rmdir dropbox:dir`.

View File

@@ -12,31 +12,17 @@ Frequently Asked Questions
Yes they do. All the rclone commands (eg `sync`, `copy` etc) will
work on all the remote storage systems.
### Can I copy the config from one machine to another ###
Sure! Rclone stores all of its config in a single file. If you want
to find this file, the simplest way is to run `rclone -h` and look at
the help for the `--config` flag which will tell you where it is. Eg,
the help for the `--config` flag which will tell you where it is.
```
$ rclone -h
Sync files and directories to and from local and remote object stores - v1.18.
[snip]
Options:
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
--checkers=8: Number of checkers to run in parallel.
-c, --checksum=false: Skip based on checksum & size, not mod-time & size
--config="/home/user/.rclone.conf": Config file.
[snip]
```
See the [remote setup docs](/remote_setup/) for more info.
So in this config the config file can be found in
`/home/user/.rclone.conf`.
Just copy that to the equivalent place in the destination (run `rclone
-h` above again on the destination machine if not sure).
### How do I configure rclone on a remote / headless box with no browser? ###
This has now been documented in its own [remote setup page](/remote_setup/).
### Can rclone sync directly from drive to s3 ###
@@ -137,3 +123,25 @@ mkdir -p /etc/ssl/certs/
curl -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
ntpclient -s -h pool.ntp.org
```
Note that you may need to add the `--insecure` option to the `curl` command line if it doesn't work without.
```
curl --insecure -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
```
### Rclone gives Failed to load config file: function not implemented error ###
Likely this means that you are running rclone on Linux version not
supported by the go runtime, ie earlier than version 2.6.23.
See the [system requirements section in the go install
docs](https://golang.org/doc/install) for full details.
### All my uploaded docx/xlsx/pptx files appear as archive/zip ###
This is caused by uploading these files from a Windows computer which
hasn't got the Microsoft Office suite installed. The easiest way to
fix is to install the Word viewer and the Microsoft Office
Compatibility Pack for Word, Excel, and PowerPoint 2007 and later
versions' file formats

View File

@@ -1,7 +1,7 @@
---
title: "Filtering"
description: "Filtering, includes and excludes"
date: "2015-09-27"
date: "2016-02-09"
---
# Filtering, includes and excludes #
@@ -9,14 +9,15 @@ date: "2015-09-27"
Rclone has a sophisticated set of include and exclude rules. Some of
these are based on patterns and some on other things like file size.
The filters are applied for the `copy`, `sync`, `move`, `ls`, `lsl`,
`md5sum`, `sha1sum`, `size`, `delete` and `check` operations.
Note that `purge` does not obey the filters.
Each path as it passes through rclone is matched against the include
and exclude rules. The paths are matched without a leading `/`.
For example the files might be passed to the matching engine like this
* `file1.jpg`
* `file2.jpg`
* `directory/file3.jpg`
and exclude rules like `--include`, `--exclude`, `--include-from`,
`--exclude-from`, `--filter`, or `--filter-from`. The simplest way to
try them out is using the `ls` command, or `--dry-run` together with
`-v`.
## Patterns ##
@@ -24,25 +25,28 @@ The patterns used to match files for inclusion or exclusion are based
on "file globs" as used by the unix shell.
If the pattern starts with a `/` then it only matches at the top level
of the directory tree. If it doesn't start with `/` then it is
matched starting at the end of the path, but it will only match a
complete path element.
of the directory tree, relative to the root of the remote.
If it doesn't start with `/` then it is matched starting at the
**end of the path**, but it will only match a complete path element:
file.jpg - matches "file.jpg"
- matches "directory/file.jpg"
- doesn't match "afile.jpg"
- doesn't match "directory/afile.jpg"
/file.jpg - matches "file.jpg"
/file.jpg - matches "file.jpg" in the root directory of the remote
- doesn't match "afile.jpg"
- doesn't match "directory/file.jpg"
**Important** Note that you must use `/` in patterns and not `\` even
if running on Windows.
A `*` matches anything but not a `/`.
*.jpg - matches "file.jpg"
- matches "directory/file.jpg"
- doesn't match "file.jpg/anotherfile.png"
- doesn't match "file.jpg/something"
Use `**` to match anything, including slashes.
Use `**` to match anything, including slashes (`/`).
dir/** - matches "dir/file.jpg"
- matches "dir/dir1/dir2/file.jpg"
@@ -148,7 +152,11 @@ Add a single include rule with `--include`.
Eg `--include *.{png,jpg}` to include all `png` and `jpg` files in the
backup and no others.
This adds an implicit `--exclude *` at the end of the filter list.
This adds an implicit `--exclude *` at the very end of the filter
list. This means you can mix `--include` and `--include-from` with the
other filters (eg `--exclude`) but you must include all the files you
want in the include statement. If this doesn't provide enough
flexibility then you must use `--filter-from`.
### `--include-from` - Read include patterns from file ###
@@ -166,7 +174,11 @@ Then use as `--include-from include-file.txt`. This will sync all
This is useful if you have a lot of rules.
This adds an implicit `--exclude *` at the end of the filter list.
This adds an implicit `--exclude *` at the very end of the filter
list. This means you can mix `--include` and `--include-from` with the
other filters (eg `--exclude`) but you must include all the files you
want in the include statement. If this doesn't provide enough
flexibility then you must use `--filter-from`.
### `--filter` - Add a file-filtering rule ###
@@ -230,6 +242,31 @@ used.
For example `--max-size 1G` means no files larger than 1GByte will be
transferred.
### `--max-age` - Don't transfer any file older than this ###
This option controls the maximum age of files to transfer. Give in
seconds or with a suffix of:
* `ms` - Milliseconds
* `s` - Seconds
* `m` - Minutes
* `h` - Hours
* `d` - Days
* `w` - Weeks
* `M` - Months
* `y` - Years
For example `--max-age 2d` means no files older than 2 days will be
transferred.
### `--min-age` - Don't transfer any file younger than this ###
This option controls the minimum age of files to transfer. Give in
seconds or with a suffix (see `--max-age` for list of suffixes)
For example `--min-age 2d` means no files younger than 2 days will be
transferred.
### `--delete-excluded` - Delete files on dest excluded from sync ###
**Important** this flag is dangerous - use with `--dry-run` and `-v` first.

View File

@@ -26,15 +26,31 @@ d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) google cloud storage
5) dropbox
6) drive
type> 4
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Amazon Cloud Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Dropbox
\ "dropbox"
5 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
6 / Google Drive
\ "drive"
7 / Hubic
\ "hubic"
8 / Local Disk
\ "local"
9 / Microsoft OneDrive
\ "onedrive"
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
11 / Yandex Disk
\ "yandex"
Storage> 5
Google Application Client Id - leave blank normally.
client_id>
Google Application Client Secret - leave blank normally.

View File

@@ -1,7 +1,7 @@
---
title: "Hubic"
description: "Rclone docs for Hubic"
date: "2015-11-08"
date: "2016-03-16"
---
<i class="fa fa-space-shuttle"></i> Hubic
@@ -23,28 +23,46 @@ This will guide you through an interactive setup process:
```
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
s) Set configuration password
n/s> n
name> remote
What type of source is it?
Choose a number from below
1) amazon cloud drive
2) drive
3) dropbox
4) google cloud storage
5) local
6) onedrive
7) hubic
8) s3
9) swift
type> 7
Hubic App Client Id - leave blank normally.
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Amazon Cloud Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Dropbox
\ "dropbox"
5 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
6 / Google Drive
\ "drive"
7 / Hubic
\ "hubic"
8 / Local Disk
\ "local"
9 / Microsoft OneDrive
\ "onedrive"
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
11 / Yandex Disk
\ "yandex"
Storage> 7
Hubic Client Id - leave blank normally.
client_id>
Hubic App Client Secret - leave blank normally.
Hubic Client Secret - leave blank normally.
client_secret>
Remote config
If your browser doesn't open automatically go to the following link: http://localhost:53682/auth
Use auto config?
* Say Y if not sure
* Say N if you are working on a remote or headless machine
y) Yes
n) No
y/n> y
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
Got code
@@ -60,6 +78,9 @@ d) Delete this remote
y/e/d> y
```
See the [remote setup docs](/remote_setup/) for how to set it up on a
machine with no Internet browser available.
Note that rclone runs a webserver on your local machine to collect the
token as returned from Hubic. This only runs from the moment it opens
your browser to the moment you get back the verification code. This
@@ -94,5 +115,10 @@ are the same.
### Limitations ###
Code to refresh the OpenStack token isn't done yet which may cause
problems with very long transfers.
This uses the normal OpenStack Swift mechanism to refresh the Swift
API credentials and ignores the expires field returned by the Hubic
API.
The Swift API doesn't return a correct MD5SUM for segmented files
(Dynamic or Static Large Objects) so rclone won't check or use the
MD5SUM for these.

View File

@@ -1,7 +1,7 @@
---
title: "Install"
description: "Rclone Installation"
date: "2015-06-12"
date: "2016-03-28"
---
Install
@@ -11,15 +11,15 @@ Rclone is a Go program and comes as a single binary file.
[Download](/downloads/) the relevant binary.
Or alternatively if you have Go installed use
Or alternatively if you have Go 1.5+ installed use
go get github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`. If you have built
rclone before then you will want to update its dependencies first with
this (remove `-f` if using go < 1.4)
this
go get -u -v -f github.com/ncw/rclone/...
go get -u -v github.com/ncw/rclone/...
See the [Usage section](/docs/) of the docs for how to use rclone, or
run `rclone -h`.

View File

@@ -25,9 +25,13 @@ on OS X.
### Filenames ###
Filenames are expected to be encoded in UTF-8 on disk. This is the
normal case for Windows and OS X. There is a bit more uncertainty in
the Linux world, but new distributions will have UTF-8 encoded files
names.
normal case for Windows and OS X.
There is a bit more uncertainty in the Linux world, but new
distributions will have UTF-8 encoded files names. If you are using an
old Linux filesystem with non UTF-8 file names (eg latin1) then you
can use the `convmv` tool to convert the filesystem to UTF-8. This
tool is available in most distributions' package managers.
If an invalid (non-UTF8) filename is read, the invalid caracters will
be replaced with the unicode replacement character, '<27>'. `rclone`
@@ -36,3 +40,37 @@ will emit a debug message in this case (use `-v` to see), eg
```
Local file system at .: Replacing invalid UTF-8 characters in "gro\xdf"
```
### Long paths on Windows ###
Rclone handles long paths automatically, by converting all paths to long
[UNC paths](https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath)
which allows paths up to 32,767 characters.
This is why you will see that your paths, for instance `c:\files` is
converted to the UNC path `\\?\c:\files` in the output,
and `\\server\share` is converted to `\\?\UNC\server\share`.
However, in rare cases this may cause problems with buggy file
system drivers like [EncFS](https://github.com/ncw/rclone/issues/261).
To disable UNC conversion globally, add this to your `.rclone.conf` file:
```
[local]
nounc = true
```
If you want to selectively disable UNC, you can add it to a separate entry like this:
```
[nounc]
type = local
nounc = true
```
And use rclone like this:
`rclone copy c:\src nounc:z:\dst`
This will use UNC paths on `c:\src` but not on `z:\dst`.
Of course this will cause problems if the absolute path length of a
file exceeds 258 characters on z, so only use this option if you have to.

View File

@@ -22,27 +22,47 @@ Here is an example of how to make a remote called `remote`. First run:
This will guide you through an interactive setup process:
```
No remotes found - make a new one
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
s) Set configuration password
n/s> n
name> remote
What type of source is it?
Choose a number from below
1) amazon cloud drive
2) drive
3) dropbox
4) google cloud storage
5) local
6) onedrive
7) s3
8) swift
type> 6
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Amazon Cloud Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Dropbox
\ "dropbox"
5 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
6 / Google Drive
\ "drive"
7 / Hubic
\ "hubic"
8 / Local Disk
\ "local"
9 / Microsoft OneDrive
\ "onedrive"
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
11 / Yandex Disk
\ "yandex"
Storage> 9
Microsoft App Client Id - leave blank normally.
client_id>
Microsoft App Client Secret - leave blank normally.
client_secret>
Remote config
Use auto config?
* Say Y if not sure
* Say N if you are working on a remote or headless machine
y) Yes
n) No
y/n> y
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
@@ -59,6 +79,9 @@ d) Delete this remote
y/e/d> y
```
See the [remote setup docs](/remote_setup/) for how to set it up on a
machine with no Internet browser available.
Note that rclone runs a webserver on your local machine to collect the
token as returned from Microsoft. This only runs from the moment it
opens your browser to the moment you get back the verification
@@ -79,14 +102,14 @@ To copy a local directory to an One Drive directory called backup
rclone copy /home/source remote:backup
### Modified time and MD5SUMs ###
### Modified time and hashes ###
One Drive allows modification times to be set on objects accurate to 1
second. These will be used to detect whether objects need syncing or
not.
One drive does not support MD5SUMs. This means the `--checksum` flag
will be equivalent to the `--size-only` flag.
One drive supports SHA1 type hashes, so you can use `--checksum` flag.
### Deleting files ###

View File

@@ -15,25 +15,30 @@ show through.
Here is an overview of the major features of each cloud storage system.
| Name | MD5SUM | ModTime | Case Insensitive | Duplicate Files |
| Name | Hash | ModTime | Case Insensitive | Duplicate Files |
| ---------------------- |:-------:|:-------:|:----------------:|:---------------:|
| Google Drive | Yes | Yes | No | Yes |
| Amazon S3 | Yes | Yes | No | No |
| Openstack Swift | Yes | Yes | No | No |
| Dropbox | No | No | Yes | No |
| Google Cloud Storage | Yes | Yes | No | No |
| Amazon Cloud Drive | Yes | No | Yes | No |
| Microsoft One Drive | No | Yes | Yes | No |
| Hubic | Yes | Yes | No | No |
| The local filesystem | Yes | Yes | Depends | No |
| Google Drive | MD5 | Yes | No | Yes |
| Amazon S3 | MD5 | Yes | No | No |
| Openstack Swift | MD5 | Yes | No | No |
| Dropbox | - | No | Yes | No |
| Google Cloud Storage | MD5 | Yes | No | No |
| Amazon Cloud Drive | MD5 | No | Yes | No |
| Microsoft One Drive | SHA1 | Yes | Yes | No |
| Hubic | MD5 | Yes | No | No |
| Backblaze B2 | SHA1 | Yes | No | No |
| Yandex Disk | MD5 | Yes | No | No |
| The local filesystem | All | Yes | Depends | No |
### MD5SUM ###
### Hash ###
The cloud storage system supports MD5SUMs of the objects. This
is used if available when transferring data as an integrity check and
The cloud storage system supports various hash types of the objects.
The hashes are used when transferring data as an integrity check and
can be specifically used with the `--checksum` flag in syncs and in
the `check` command.
To use the checksum checks between filesystems they must support a
common hash type.
### ModTime ###
The cloud storage system supports setting modification times on
@@ -57,7 +62,7 @@ matter how many times you run the sync it never completes fully.
The local filesystem may or may not be case sensitive depending on OS.
* Windows - usually case insensitive
* Windows - usually case insensitive, though case is preserved
* OSX - usually case insensitive, though it is possible to format case sensitive
* Linux - usually case sensitive, but there are case insensitive file systems (eg FAT formatted USB keys)
@@ -70,4 +75,5 @@ systems.
If a cloud storage system allows duplicate files then it can have two
objects with the same name.
This confuses rclone greatly when syncing.
This confuses rclone greatly when syncing - use the `rclone dedupe`
command to rename or remove duplicates.

View File

@@ -0,0 +1,88 @@
---
title: "Remote Setup"
description: "Configuring rclone up on a remote / headless machine"
date: "2016-01-07"
---
# Configuring rclone on a remote / headless machine #
Some of the configurations (those involving oauth2) require an
Internet connected web browser.
If you are trying to set rclone up on a remote or headless box with no
browser available on it (eg a NAS or a server in a datacenter) then
you will need to use an alternative means of configuration. There are
two ways of doing it, described below.
## Configuring using rclone authorize ##
On the headless box
```
...
Remote config
Use auto config?
* Say Y if not sure
* Say N if you are working on a remote or headless machine
y) Yes
n) No
y/n> n
For this to work, you will need rclone available on a machine that has a web browser available.
Execute the following on your machine:
rclone authorize "amazon cloud drive"
Then paste the result below:
result>
```
Then on your main desktop machine
```
rclone authorize "amazon cloud drive"
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
Got code
Paste the following into your remote machine --->
SECRET_TOKEN
<---End paste
```
Then back to the headless box, paste in the code
```
result> SECRET_TOKEN
--------------------
[acd12]
client_id =
client_secret =
token = SECRET_TOKEN
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d>
```
## Configuring by copying the config file ##
Rclone stores all of its config in a single configuration file. This
can easily be copied to configure a remote rclone.
So first configure rclone on your desktop machine
rclone config
to set up the config file.
Find the config file by running `rclone -h` and looking for the help for the `--config` option
```
$ rclone -h
[snip]
--config="/home/user/.rclone.conf": Config file.
[snip]
```
Now transfer it to the remote box (scp, cut paste, ftp, sftp etc) and
place it in the correct place (use `rclone -h` on the remote box to
find out where).

View File

@@ -19,39 +19,82 @@ This will guide you through an interactive setup process.
```
No remotes found - make a new one
n) New remote
q) Quit config
n/q> n
s) Set configuration password
n/s> n
name> remote
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) google cloud storage
5) dropbox
6) drive
type> 2
AWS Access Key ID.
access_key_id> accesskey
AWS Secret Access Key (password).
secret_access_key> secretaccesskey
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Amazon Cloud Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Dropbox
\ "dropbox"
5 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
6 / Google Drive
\ "drive"
7 / Hubic
\ "hubic"
8 / Local Disk
\ "local"
9 / Microsoft OneDrive
\ "onedrive"
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
11 / Yandex Disk
\ "yandex"
Storage> 2
Get AWS credentials from runtime (environment variables or EC2 meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.
Choose a number from below, or type in your own value
1 / Enter AWS credentials in the next step
\ "false"
2 / Get AWS credentials from the environment (env vars or IAM)
\ "true"
env_auth> 1
AWS Access Key ID - leave blank for anonymous access or runtime credentials.
access_key_id> access_key
AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.
secret_access_key> secret_key
Region to connect to.
Choose a number from below, or type in your own value
* The default endpoint - a good choice if you are unsure.
* US Region, Northern Virginia or Pacific Northwest.
* Leave location constraint empty.
1) us-east-1
* US West (Oregon) Region
* Needs location constraint us-west-2.
2) us-west-2
[snip]
* South America (Sao Paulo) Region
* Needs location constraint sa-east-1.
9) sa-east-1
* If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.
10) other-v2-signature
* If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.
11) other-v4-signature
/ The default endpoint - a good choice if you are unsure.
1 | US Region, Northern Virginia or Pacific Northwest.
| Leave location constraint empty.
\ "us-east-1"
/ US West (Oregon) Region
2 | Needs location constraint us-west-2.
\ "us-west-2"
/ US West (Northern California) Region
3 | Needs location constraint us-west-1.
\ "us-west-1"
/ EU (Ireland) Region Region
4 | Needs location constraint EU or eu-west-1.
\ "eu-west-1"
/ EU (Frankfurt) Region
5 | Needs location constraint eu-central-1.
\ "eu-central-1"
/ Asia Pacific (Singapore) Region
6 | Needs location constraint ap-southeast-1.
\ "ap-southeast-1"
/ Asia Pacific (Sydney) Region
7 | Needs location constraint ap-southeast-2.
\ "ap-southeast-2"
/ Asia Pacific (Tokyo) Region
8 | Needs location constraint ap-northeast-1.
\ "ap-northeast-1"
/ South America (Sao Paulo) Region
9 | Needs location constraint sa-east-1.
\ "sa-east-1"
/ If using an S3 clone that only understands v2 signatures
10 | eg Ceph/Dreamhost
| set this and make sure you set the endpoint.
\ "other-v2-signature"
/ If using an S3 clone that understands v4 signatures set this
11 | and make sure you set the endpoint.
\ "other-v4-signature"
region> 1
Endpoint for S3 API.
Leave blank if using AWS to use the default endpoint for the region.
@@ -59,21 +102,31 @@ Specify if using an S3 clone such as Ceph.
endpoint>
Location constraint - must be set to match the Region. Used when creating buckets only.
Choose a number from below, or type in your own value
* Empty for US Region, Northern Virginia or Pacific Northwest.
1)
* US West (Oregon) Region.
2) us-west-2
* US West (Northern California) Region.
3) us-west-1
* EU (Ireland) Region.
4) eu-west-1
[snip]
1 / Empty for US Region, Northern Virginia or Pacific Northwest.
\ ""
2 / US West (Oregon) Region.
\ "us-west-2"
3 / US West (Northern California) Region.
\ "us-west-1"
4 / EU (Ireland) Region.
\ "eu-west-1"
5 / EU Region.
\ "EU"
6 / Asia Pacific (Singapore) Region.
\ "ap-southeast-1"
7 / Asia Pacific (Sydney) Region.
\ "ap-southeast-2"
8 / Asia Pacific (Tokyo) Region.
\ "ap-northeast-1"
9 / South America (Sao Paulo) Region.
\ "sa-east-1"
location_constraint> 1
Remote config
--------------------
[remote]
access_key_id = accesskey
secret_access_key = secretaccesskey
env_auth = false
access_key_id = access_key
secret_access_key = secret_key
region = us-east-1
endpoint =
location_constraint =
@@ -82,17 +135,6 @@ y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
Current remotes:
Name Type
==== ====
remote s3
e) Edit existing remote
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> q
```
This remote is called `remote` and can now be used like this
@@ -133,36 +175,59 @@ created in. If you attempt to access a bucket from the wrong region,
you will get an error, `incorrect region, the bucket is not in 'XXX'
region`.
### Authentication ###
There are two ways to supply `rclone` with a set of AWS
credentials. In order of precedence:
- Directly in the rclone configuration file (as configured by `rclone config`)
- set `access_key_id` and `secret_access_key`
- Runtime configuration:
- set `env_auth` to `true` in the config file
- Exporting the following environment variables before running `rclone`
- Access Key ID: `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`
- Secret Access Key: `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY`
- Running `rclone` on an EC2 instance with an IAM role
If none of these option actually end up providing `rclone` with AWS
credentials then S3 interaction will be non-authenticated (see below).
### Anonymous access to public buckets ###
If you want to use rclone to access a public bucket, configure with a
blank `access_key_id` and `secret_access_key`. Eg
```
e) Edit existing remote
No remotes found - make a new one
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
n/q> n
name> anons3
What type of source is it?
Choose a number from below
1) amazon cloud drive
2) drive
3) dropbox
4) google cloud storage
5) local
6) s3
7) swift
type> 6
AWS Access Key ID - leave blank for anonymous access.
access_key_id>
AWS Secret Access Key (password) - leave blank for anonymous access.
secret_access_key>
Region to connect to.
region> 1
endpoint>
location_constraint>
2) b2
3) drive
4) dropbox
5) google cloud storage
6) swift
7) hubic
8) local
9) onedrive
10) s3
11) yandex
type> 10
Get AWS credentials from runtime (environment variables or EC2 meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.
Choose a number from below, or type in your own value
* Enter AWS credentials in the next step
1) false
* Get AWS credentials from the environment (env vars or IAM)
2) true
env_auth> 1
AWS Access Key ID - leave blank for anonymous access or runtime credentials.
access_key_id>
AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.
secret_access_key>
...
```
Then use it as normal with the name of the public bucket, eg

View File

@@ -25,42 +25,68 @@ This will guide you through an interactive setup process.
```
No remotes found - make a new one
n) New remote
q) Quit config
n/q> n
s) Set configuration password
n/s> n
name> remote
What type of source is it?
Choose a number from below
1) swift
2) s3
3) local
4) drive
type> 1
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Amazon Cloud Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Dropbox
\ "dropbox"
5 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
6 / Google Drive
\ "drive"
7 / Hubic
\ "hubic"
8 / Local Disk
\ "local"
9 / Microsoft OneDrive
\ "onedrive"
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
11 / Yandex Disk
\ "yandex"
Storage> 10
User name to log in.
user> user_name
API key or password.
key> password_or_api_key
Authentication URL for server.
Choose a number from below, or type in your own value
* Rackspace US
1) https://auth.api.rackspacecloud.com/v1.0
* Rackspace UK
2) https://lon.auth.api.rackspacecloud.com/v1.0
* Rackspace v2
3) https://identity.api.rackspacecloud.com/v2.0
* Memset Memstore UK
4) https://auth.storage.memset.com/v1.0
* Memset Memstore UK v2
5) https://auth.storage.memset.com/v2.0
1 / Rackspace US
\ "https://auth.api.rackspacecloud.com/v1.0"
2 / Rackspace UK
\ "https://lon.auth.api.rackspacecloud.com/v1.0"
3 / Rackspace v2
\ "https://identity.api.rackspacecloud.com/v2.0"
4 / Memset Memstore UK
\ "https://auth.storage.memset.com/v1.0"
5 / Memset Memstore UK v2
\ "https://auth.storage.memset.com/v2.0"
6 / OVH
\ "https://auth.cloud.ovh.net/v2.0"
auth> 1
Tenant name - optional
tenant>
tenant>
Region name - optional
region>
Storage URL - optional
storage_url>
Remote config
--------------------
[remote]
user = user_name
key = password_or_api_key
auth = https://auth.api.rackspacecloud.com/v1.0
tenant =
tenant =
region =
storage_url =
--------------------
y) Yes this is OK
e) Edit this remote
@@ -105,3 +131,9 @@ ns.
This is a defacto standard (used in the official python-swiftclient
amongst others) for storing the modification time for an object.
### Limitations ###
The Swift API doesn't return a correct MD5SUM for segmented files
(Dynamic or Static Large Objects) so rclone won't check or use the
MD5SUM for these.

113
docs/content/yandex.md Normal file
View File

@@ -0,0 +1,113 @@
---
title: "Yandex"
description: "Yandex Disk"
date: "2015-12-30"
---
<i class="fa fa-space-shuttle"></i>Yandex Disk
----------------------------------------
[Yandex Disk](https://disk.yandex.com) is a cloud storage solution created by [Yandex](http://yandex.com).
Yandex paths may be as deep as required, eg `remote:directory/subdirectory`.
Here is an example of making a yandex configuration. First run
rclone config
This will guide you through an interactive setup process:
```
No remotes found - make a new one
n) New remote
s) Set configuration password
n/s> n
name> remote
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Amazon Cloud Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Dropbox
\ "dropbox"
5 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
6 / Google Drive
\ "drive"
7 / Hubic
\ "hubic"
8 / Local Disk
\ "local"
9 / Microsoft OneDrive
\ "onedrive"
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
11 / Yandex Disk
\ "yandex"
Storage> 11
Yandex Client Id - leave blank normally.
client_id>
Yandex Client Secret - leave blank normally.
client_secret>
Remote config
Use auto config?
* Say Y if not sure
* Say N if you are working on a remote or headless machine
y) Yes
n) No
y/n> y
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
Got code
--------------------
[remote]
client_id =
client_secret =
token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"bearer","expiry":"2016-12-29T12:27:11.362788025Z"}
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
See the [remote setup docs](/remote_setup/) for how to set it up on a
machine with no Internet browser available.
Note that rclone runs a webserver on your local machine to collect the
token as returned from Yandex Disk. This only runs from the moment it
opens your browser to the moment you get back the verification code.
This is on `http://127.0.0.1:53682/` and this it may require you to
unblock it temporarily if you are running a host firewall.
Once configured you can then use `rclone` like this,
See top level directories
rclone lsd remote:
Make a new directory
rclone mkdir remote:directory
List the contents of a directory
rclone ls remote:directory
Sync `/home/local/directory` to the remote path, deleting any
excess files in the path.
rclone sync /home/local/directory remote:directory
### Modified time ###
Modified times are supported and are stored accurate to 1 ns in custom
metadata called `rclone_modified` in RFC3339 with nanoseconds format.
### MD5 checksums ###
MD5 checksums are natively supported by Yandex Disk.

View File

@@ -2,7 +2,7 @@
<div class="row">
<hr>
<div class="col-sm-12">
<p>&copy; <a href="http://www.craig-wood.com/nick/">Nick Craig-Wood</a> 2014<br>
<p>&copy; <a href="http://www.craig-wood.com/nick/">Nick Craig-Wood</a> 2014-2016<br>
Website hosted on <a href="http://www.memset.com/cloud/storage/">Memset Memstore™</a>,
uploaded with <a href="http://rclone.org">rclone</a>
and built with <a href="https://github.com/spf13/hugo">Hugo</a></p>

View File

@@ -7,7 +7,7 @@
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="{{ .Site.BaseUrl }}"><i class="fa fa-home"></i> {{ .Site.Title }}</a>
<a class="navbar-brand" href="{{ .Site.BaseURL }}"><i class="fa fa-home"></i> {{ .Site.Title }}</a>
</div>
<div class="collapse navbar-collapse navbar-ex1-collapse">
<ul class="nav navbar-nav">
@@ -39,7 +39,9 @@
<li><a href="/amazonclouddrive/"><i class="fa fa-amazon"></i> Amazon Cloud Drive</a></li>
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft One Drive</a></li>
<li><a href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a></li>
<li><a href="/b2/"><i class="fa fa-fire"></i> Backblaze B2</a></li>
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
<li><a href="/yandex/"><i class="fa fa-space-shuttle"></i> Yandex Disk</a></li>
</ul>
</li>
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>

View File

@@ -4,7 +4,7 @@
<div class="container">
<div class="row">
<div class="col-md-9">
{{ range $key, $value := .Site.Indexes.groups.about.Pages }}
{{ range $key, $value := .Site.Taxonomies.groups.about.Pages }}
{{ $value.Content }}
{{ end }}
</div>

View File

@@ -2,7 +2,7 @@
{{ range .Data.Pages }}
<url>
<loc>{{ .Permalink }}</loc>
<lastmod>{{ safeHtml ( .Date.Format "2006-01-02T15:04:05-07:00" ) }}</lastmod>{{ with .Sitemap.ChangeFreq }}
<lastmod>{{ safeHTML ( .Date.Format "2006-01-02T15:04:05-07:00" ) }}</lastmod>{{ with .Sitemap.ChangeFreq }}
<changefreq>{{ . }}</changefreq>{{ end }}{{ if ge .Sitemap.Priority 0.0 }}
<priority>{{ .Sitemap.Priority }}</priority>{{ end }}
</url>

View File

@@ -30,67 +30,99 @@ import (
// Constants
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneClientSecret = "8p/yms3OlNXE9OTDl/HLypf9gdiJ5cT3"
driveFolderType = "application/vnd.google-apps.folder"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "8p/yms3OlNXE9OTDl/HLypf9gdiJ5cT3"
driveFolderType = "application/vnd.google-apps.folder"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
minSleep = 10 * time.Millisecond
maxSleep = 2000 * time.Millisecond
decayConstant = 0 // bigger for slower decay, exponential
attackConstant = 0 // bigger for slower attack, exponential
defaultExtensions = "docx,xlsx,pptx,svg"
)
// Globals
var (
// Flags
driveFullList = pflag.BoolP("drive-full-list", "", true, "Use a full listing for directory list. More data but usually quicker.")
driveUseTrash = pflag.BoolP("drive-use-trash", "", false, "Send files to the trash instead of deleting permanently.")
driveFullList = pflag.BoolP("drive-full-list", "", false, "Use a full listing for directory list. More data but usually quicker. (obsolete)")
driveAuthOwnerOnly = pflag.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user. Requires drive-full-list.")
driveUseTrash = pflag.BoolP("drive-use-trash", "", false, "Send files to the trash instead of deleting permanently.")
driveExtensions = pflag.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
chunkSize = fs.SizeSuffix(256 * 1024)
chunkSize = fs.SizeSuffix(8 * 1024 * 1024)
driveUploadCutoff = chunkSize
// Description of how to auth for this app
driveConfig = &oauth2.Config{
Scopes: []string{"https://www.googleapis.com/auth/drive"},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: fs.Reveal(rcloneClientSecret),
ClientSecret: fs.Reveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
}
mimeTypeToExtension = map[string]string{
"application/msword": "doc",
"application/pdf": "pdf",
"application/rtf": "rtf",
"application/vnd.ms-excel": "xls",
"application/vnd.oasis.opendocument.spreadsheet": "ods",
"application/vnd.oasis.opendocument.text": "odt",
"application/vnd.openxmlformats-officedocument.presentationml.presentation": "pptx",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx",
"application/x-vnd.oasis.opendocument.spreadsheet": "ods",
"application/zip": "zip",
"image/jpeg": "jpg",
"image/png": "png",
"image/svg+xml": "svg",
"text/csv": "csv",
"text/html": "html",
"text/plain": "txt",
}
extensionToMimeType map[string]string
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "drive",
NewFs: NewFs,
fs.Register(&fs.RegInfo{
Name: "drive",
Description: "Google Drive",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config(name, driveConfig)
err := oauthutil.Config("drive", name, driveConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: oauthutil.ConfigClientID,
Name: fs.ConfigClientID,
Help: "Google Application Client Id - leave blank normally.",
}, {
Name: oauthutil.ConfigClientSecret,
Name: fs.ConfigClientSecret,
Help: "Google Application Client Secret - leave blank normally.",
}},
})
pflag.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
pflag.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
// Invert mimeTypeToExtension
extensionToMimeType = make(map[string]string, len(mimeTypeToExtension))
for mimeType, extension := range mimeTypeToExtension {
extensionToMimeType[extension] = mimeType
}
}
// Fs represents a remote drive server
type Fs struct {
name string // name of this remote
svc *drive.Service // the connection to the drive server
root string // the path we are working on
client *http.Client // authorized client
about *drive.About // information about the drive, including the root
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // To pace the API calls
name string // name of this remote
svc *drive.Service // the connection to the drive server
root string // the path we are working on
client *http.Client // authorized client
about *drive.About // information about the drive, including the root
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // To pace the API calls
extensions []string // preferred extensions to download docs
}
// Object describes a drive object
@@ -102,6 +134,7 @@ type Object struct {
md5sum string // md5sum of the object
bytes int64 // size of the object
modifiedDate string // RFC3339 time it was last modified
isDocument bool // if set this is a Google doc
}
// ------------------------------------------------------------
@@ -216,6 +249,27 @@ func isPowerOfTwo(x int64) bool {
}
}
// parseExtensions parses drive export extensions from a string
func (f *Fs) parseExtensions(extensions string) error {
for _, extension := range strings.Split(extensions, ",") {
extension = strings.ToLower(strings.TrimSpace(extension))
if _, found := extensionToMimeType[extension]; !found {
return fmt.Errorf("Couldn't find mime type for extension %q", extension)
}
found := false
for _, existingExtension := range f.extensions {
if extension == existingExtension {
found = true
break
}
}
if !found {
f.extensions = append(f.extensions, extension)
}
}
return nil
}
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, path string) (fs.Fs, error) {
if !isPowerOfTwo(int64(chunkSize)) {
@@ -238,7 +292,7 @@ func NewFs(name, path string) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetAttackConstant(attackConstant),
}
// Create a new authorized Drive client.
@@ -259,6 +313,16 @@ func NewFs(name, path string) (fs.Fs, error) {
f.dirCache = dircache.New(root, f.about.RootFolderId, f)
// Parse extensions
err = f.parseExtensions(*driveExtensions)
if err != nil {
return nil, err
}
err = f.parseExtensions(defaultExtensions) // make sure there are some sensible ones on there
if err != nil {
return nil, err
}
// Find the current root
err = f.dirCache.FindRoot(false)
if err != nil {
@@ -353,6 +417,41 @@ func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
return info.Id, nil
}
// isAuthOwned checks if any of the item owners is the authenticated owner
func isAuthOwned(item *drive.File) bool {
for _, owner := range item.Owners {
if owner.IsAuthenticatedUser {
return true
}
}
return false
}
// findExportFormat works out the optimum extension and download URL
// for this item.
//
// Look through the extensions and find the first format that can be
// converted. If none found then return "", ""
func (f *Fs) findExportFormat(filepath string, item *drive.File) (extension, link string) {
// Warn about unknown export formats
for mimeType := range item.ExportLinks {
if _, ok := mimeTypeToExtension[mimeType]; !ok {
fs.Debug(filepath, "Unknown export type %q - ignoring", mimeType)
}
}
// Find the first export format we can
for _, extension := range f.extensions {
mimeType := extensionToMimeType[extension]
if link, ok := item.ExportLinks[mimeType]; ok {
return extension, link
}
}
// else return empty
return "", ""
}
// Path should be directory path either "" or "path/"
//
// List the directory using a recursive list from the root
@@ -364,12 +463,15 @@ func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) err
// Make the API request
var wg sync.WaitGroup
_, err := f.listAll(dirID, "", false, false, func(item *drive.File) bool {
// Recurse on directories
if item.MimeType == driveFolderType {
filepath := path + item.Title
switch {
case *driveAuthOwnerOnly && !isAuthOwned(item):
// ignore object or directory
case item.MimeType == driveFolderType:
// Recurse on directories
wg.Add(1)
folder := path + item.Title + "/"
folder := filepath + "/"
fs.Debug(f, "Reading %s", folder)
go func() {
defer wg.Done()
err := f.listDirRecursive(item.Id, folder, out)
@@ -379,13 +481,27 @@ func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) err
}
}()
} else {
// If item has no MD5 sum it isn't stored on drive, so ignore it
if item.Md5Checksum != "" {
if fs := f.newFsObjectWithInfo(path+item.Title, item); fs != nil {
out <- fs
case item.Md5Checksum != "":
// If item has MD5 sum it is a file stored on drive
if o := f.newFsObjectWithInfo(filepath, item); o != nil {
out <- o
}
case len(item.ExportLinks) != 0:
// If item has export links then it is a google doc
extension, link := f.findExportFormat(filepath, item)
if extension == "" {
fs.Debug(filepath, "No export formats found")
} else {
if o := f.newFsObjectWithInfo(filepath+"."+extension, item); o != nil {
obj := o.(*Object)
obj.isDocument = true
obj.url = link
obj.bytes = -1
out <- o
}
}
default:
fs.Debug(filepath, "Ignoring unknown object")
}
return false
})
@@ -400,74 +516,6 @@ func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) err
return nil
}
// Path should be directory path either "" or "path/"
//
// List the directory using a full listing and filtering out unwanted
// items
//
// This is fast in terms of number of API calls, but slow in terms of
// fetching more data than it needs
func (f *Fs) listDirFull(dirID string, path string, out fs.ObjectsChan) error {
// Orphans waiting for their parent
orphans := make(map[string][]*drive.File)
var outputItem func(*drive.File, string) // forward def for recursive fn
// Output an item or directory
outputItem = func(item *drive.File, directory string) {
// fmt.Printf("found %q %q parent %q dir %q ok %s\n", item.Title, item.Id, parentId, directory, ok)
path := item.Title
if directory != "" {
path = directory + "/" + path
}
if item.MimeType == driveFolderType {
// Put the directory into the dircache
f.dirCache.Put(path, item.Id)
// fmt.Printf("directory %s %s %s\n", path, item.Title, item.Id)
// Collect the orphans if any
for _, orphan := range orphans[item.Id] {
// fmt.Printf("rescuing orphan %s %s %s\n", path, orphan.Title, orphan.Id)
outputItem(orphan, path)
}
delete(orphans, item.Id)
} else {
// fmt.Printf("file %s %s %s\n", path, item.Title, item.Id)
// If item has no MD5 sum it isn't stored on drive, so ignore it
if item.Md5Checksum != "" {
if fs := f.newFsObjectWithInfo(path, item); fs != nil {
out <- fs
}
}
}
}
// Make the API request
_, err := f.listAll("", "", false, false, func(item *drive.File) bool {
if len(item.Parents) == 0 {
// fmt.Printf("no parents %s %s: %#v\n", item.Title, item.Id, item)
return false
}
parentID := item.Parents[0].Id
directory, ok := f.dirCache.GetInv(parentID)
if !ok {
// Haven't found the parent yet so add to orphans
// fmt.Printf("orphan[%s] %s %s\n", parentID, item.Title, item.Id)
orphans[parentID] = append(orphans[parentID], item)
} else {
outputItem(item, directory)
}
return false
})
if err != nil {
return err
}
if len(orphans) > 0 {
// fmt.Printf("Orphans!!!! %v", orphans)
}
return nil
}
// List walks the path returning a channel of FsObjects
func (f *Fs) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
@@ -478,11 +526,7 @@ func (f *Fs) List() fs.ObjectsChan {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
if f.root == "" && *driveFullList {
err = f.listDirFull(f.dirCache.RootID(), "", out)
} else {
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
}
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "List failed: %s", err)
@@ -558,7 +602,11 @@ func (f *Fs) createFileInfo(remote string, modTime time.Time, size int64) (*Obje
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
remote := src.Remote()
size := src.Size()
modTime := src.ModTime()
o, createInfo, err := f.createFileInfo(remote, modTime, size)
if err != nil {
return nil, err
@@ -648,6 +696,9 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
fs.Debug(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
if srcObj.isDocument {
return nil, fmt.Errorf("Can't copy a Google document")
}
o, createInfo, err := f.createFileInfo(remote, srcObj.ModTime(), srcObj.bytes)
if err != nil {
@@ -710,6 +761,9 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
fs.Debug(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
if srcObj.isDocument {
return nil, fmt.Errorf("Can't move a Google document")
}
// Temporary FsObject under construction
dstObj, dstInfo, err := f.createFileInfo(remote, srcObj.ModTime(), srcObj.bytes)
@@ -718,7 +772,11 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
}
// Do the move
info, err := f.svc.Files.Patch(srcObj.id, dstInfo).SetModifiedDate(true).Do()
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Patch(srcObj.id, dstInfo).SetModifiedDate(true).Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
@@ -760,7 +818,10 @@ func (f *Fs) DirMove(src fs.Fs) error {
Title: leaf,
Parents: []*drive.ParentReference{{Id: directoryID}},
}
_, err = f.svc.Files.Patch(srcFs.dirCache.RootID(), &patch).Do()
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.Patch(srcFs.dirCache.RootID(), &patch).Do()
return shouldRetry(err)
})
if err != nil {
return err
}
@@ -768,10 +829,15 @@ func (f *Fs) DirMove(src fs.Fs) error {
return nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
func (o *Object) Fs() fs.Info {
return o.fs
}
@@ -788,13 +854,28 @@ func (o *Object) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Md5sum() (string, error) {
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
}
return o.md5sum, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
if o.isDocument && o.bytes < 0 {
// If it is a google doc then we must HEAD it to see
// how big it is
res, err := o.httpResponse("HEAD")
if err != nil {
fs.ErrorLog(o, "Error reading size: %v", err)
return 0
}
_ = res.Body.Close()
o.bytes = res.ContentLength
// fs.Debug(o, "Read size of document: %v", o.bytes)
}
return o.bytes
}
@@ -855,12 +936,10 @@ func (o *Object) ModTime() time.Time {
}
// SetModTime sets the modification time of the drive fs object
func (o *Object) SetModTime(modTime time.Time) {
func (o *Object) SetModTime(modTime time.Time) error {
err := o.readMetaData()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to read metadata: %s", err)
return
return err
}
// New metadata
updateInfo := &drive.File{
@@ -873,12 +952,11 @@ func (o *Object) SetModTime(modTime time.Time) {
return shouldRetry(err)
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
return
return err
}
// Update info from read data
o.setMetaData(info)
return nil
}
// Storable returns a boolean as to whether this object is storable
@@ -886,17 +964,17 @@ func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open() (in io.ReadCloser, err error) {
// httpResponse gets an http.Response object for the object o.url
// using the method passed in
func (o *Object) httpResponse(method string) (res *http.Response, err error) {
if o.url == "" {
return nil, fmt.Errorf("Forbidden to download - check sharing permission")
}
req, err := http.NewRequest("GET", o.url, nil)
req, err := http.NewRequest(method, o.url, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", fs.UserAgent)
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.client.Do(req)
return shouldRetry(err)
@@ -904,10 +982,57 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
if err != nil {
return nil, err
}
return res, nil
}
// openFile represents an Object open for reading
type openFile struct {
o *Object // Object we are reading for
in io.ReadCloser // reading from here
bytes int64 // number of bytes read on this connection
eof bool // whether we have read end of file
}
// Read bytes from the object - see io.Reader
func (file *openFile) Read(p []byte) (n int, err error) {
n, err = file.in.Read(p)
file.bytes += int64(n)
if err == io.EOF {
file.eof = true
}
return
}
// Close the object and update bytes read
func (file *openFile) Close() (err error) {
// If end of file, update bytes read
if file.eof {
// fs.Debug(file.o, "Updating size of doc after download to %v", file.bytes)
file.o.bytes = file.bytes
}
return file.in.Close()
}
// Check it satisfies the interfaces
var _ io.ReadCloser = &openFile{}
// Open an object for read
func (o *Object) Open() (in io.ReadCloser, err error) {
res, err := o.httpResponse("GET")
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
_ = res.Body.Close() // ignore error
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
}
// If it is a document, update the size with what we are
// reading as it can change from the HEAD in the listing to
// this GET. This stops rclone marking the transfer as
// corrupted.
if o.isDocument {
return &openFile{o: o, in: res.Body}, nil
}
return res.Body, nil
}
@@ -916,9 +1041,15 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
size := src.Size()
modTime := src.ModTime()
if o.isDocument {
return fmt.Errorf("Can't update a google document")
}
updateInfo := &drive.File{
Id: o.id,
MimeType: fs.MimeType(o),
ModifiedDate: modTime.Format(timeFormatOut),
}
@@ -947,6 +1078,9 @@ func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
// Remove an object
func (o *Object) Remove() error {
if o.isDocument {
return fmt.Errorf("Can't delete a google document")
}
var err error
err = o.fs.pacer.Call(func() (bool, error) {
if *driveUseTrash {

View File

@@ -0,0 +1,59 @@
package drive
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"google.golang.org/api/drive/v2"
)
func TestInternalParseExtensions(t *testing.T) {
for _, test := range []struct {
in string
want []string
wantErr error
}{
{"doc", []string{"doc"}, nil},
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
{"docx,potato,docx", []string{"docx"}, fmt.Errorf(`Couldn't find mime type for extension "potato"`)},
} {
f := new(Fs)
gotErr := f.parseExtensions(test.in)
assert.Equal(t, test.wantErr, gotErr)
assert.Equal(t, test.want, f.extensions)
}
// Test it is appending
f := new(Fs)
assert.Nil(t, f.parseExtensions("docx,svg"))
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
}
func TestInternalFindExportFormat(t *testing.T) {
item := new(drive.File)
item.ExportLinks = map[string]string{
"application/pdf": "http://pdf",
"application/rtf": "http://rtf",
}
for _, test := range []struct {
extensions []string
wantExtension string
wantLink string
}{
{[]string{}, "", ""},
{[]string{"pdf"}, "pdf", "http://pdf"},
{[]string{"pdf", "rtf", "xls"}, "pdf", "http://pdf"},
{[]string{"xls", "rtf", "pdf"}, "rtf", "http://rtf"},
{[]string{"xls", "csv", "svg"}, "", ""},
} {
f := new(Fs)
f.extensions = test.extensions
gotExtension, gotLink := f.findExportFormat("file", item)
assert.Equal(t, test.wantExtension, gotExtension)
assert.Equal(t, test.wantLink, gotLink)
}
}

View File

@@ -42,7 +42,7 @@ func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }

View File

@@ -27,9 +27,9 @@ import (
// Constants
const (
rcloneAppKey = "5jcck7diasz0rqy"
rcloneAppSecret = "m8WRxJ6b1Z/Y25fDwJWS"
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
rcloneAppKey = "5jcck7diasz0rqy"
rcloneEncryptedAppSecret = "m8WRxJ6b1Z/Y25fDwJWS"
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
)
var (
@@ -44,10 +44,11 @@ var (
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "dropbox",
NewFs: NewFs,
Config: configHelper,
fs.Register(&fs.RegInfo{
Name: "dropbox",
Description: "Dropbox",
NewFs: NewFs,
Config: configHelper,
Options: []fs.Option{{
Name: "app_key",
Help: "Dropbox App Key - leave blank normally.",
@@ -137,7 +138,7 @@ func newDropbox(name string) (*dropbox.Dropbox, error) {
}
appSecret := fs.ConfigFile.MustValue(name, "app_secret")
if appSecret == "" {
appSecret = fs.Reveal(rcloneAppSecret)
appSecret = fs.Reveal(rcloneEncryptedAppSecret)
}
err := db.SetAppInfo(appKey, appSecret)
@@ -379,13 +380,13 @@ func (rc *readCloser) Close() error {
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
remote: src.Remote(),
}
return o, o.Update(in, modTime, size)
return o, o.Update(in, src)
}
// Mkdir creates the container if it doesn't exist
@@ -523,10 +524,15 @@ func (f *Fs) DirMove(src fs.Fs) error {
return nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashNone)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
func (o *Object) Fs() fs.Info {
return o.fs
}
@@ -543,9 +549,9 @@ func (o *Object) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Md5sum() (string, error) {
return "", nil
// Hash is unsupported on Dropbox
func (o *Object) Hash(t fs.HashType) (string, error) {
return "", fs.ErrHashUnsupported
}
// Size returns the size of an object in bytes
@@ -630,9 +636,9 @@ func (o *Object) ModTime() time.Time {
// SetModTime sets the modification time of the local fs object
//
// Commits the datastore
func (o *Object) SetModTime(modTime time.Time) {
func (o *Object) SetModTime(modTime time.Time) error {
// FIXME not implemented
return
return fs.ErrorCantSetModTime
}
// Storable returns whether this object is storable
@@ -651,10 +657,10 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
fs.ErrorLog(o, "File name disallowed - not uploading")
fs.Log(o, "File name disallowed - not uploading")
return nil
}
entry, err := o.fs.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")

View File

@@ -42,7 +42,7 @@ func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }

16
fs/all/all.go Normal file
View File

@@ -0,0 +1,16 @@
package all
import (
// Active file systems
_ "github.com/ncw/rclone/amazonclouddrive"
_ "github.com/ncw/rclone/b2"
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
_ "github.com/ncw/rclone/hubic"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/onedrive"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
_ "github.com/ncw/rclone/yandex"
)

View File

@@ -4,8 +4,14 @@ package fs
import (
"bufio"
"bytes"
"crypto/rand"
"crypto/sha256"
"crypto/tls"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"net/http"
@@ -16,16 +22,29 @@ import (
"strconv"
"strings"
"time"
"crypto/tls"
"unicode/utf8"
"github.com/Unknwon/goconfig"
"github.com/mreiferson/go-httpclient"
"github.com/spf13/pflag"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/text/unicode/norm"
)
const (
configFileName = ".rclone.conf"
// ConfigToken is the key used to store the token under
ConfigToken = "token"
// ConfigClientID is the config key used to store the client id
ConfigClientID = "client_id"
// ConfigClientSecret is the config key used to store the client secret
ConfigClientSecret = "client_secret"
// ConfigAutomatic indicates that we want non-interactive configuration
ConfigAutomatic = "config_automatic"
)
// SizeSuffix is parsed by flag with k/M/G suffixes
@@ -42,21 +61,35 @@ var (
// Config is the global config
Config = &ConfigInfo{}
// Flags
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
skipVerify = pflag.BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
bwLimit SizeSuffix
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
ignoreTimes = pflag.BoolP("ignore-times", "I", false, "Don't skip files that match size and time - transfer all files")
ignoreExisting = pflag.BoolP("ignore-existing", "", false, "Skip all files that exist on destination")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
skipVerify = pflag.BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
AskPassword = pflag.BoolP("ask-password", "", true, "Allow prompt for password for encrypted configuration.")
deleteBefore = pflag.BoolP("delete-before", "", false, "When synchronizing, delete files on destination before transfering")
deleteDuring = pflag.BoolP("delete-during", "", false, "When synchronizing, delete files during transfer (default)")
deleteAfter = pflag.BoolP("delete-after", "", false, "When synchronizing, delete files on destination after transfering")
lowLevelRetries = pflag.IntP("low-level-retries", "", 10, "Number of low level retries to do.")
updateOlder = pflag.BoolP("update", "u", false, "Skip files that are newer on the destination.")
noGzip = pflag.BoolP("no-gzip-encoding", "", false, "Don't set Accept-Encoding: gzip.")
dedupeMode = pflag.StringP("dedupe-mode", "", "interactive", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
bwLimit SizeSuffix
// Key to use for password en/decryption.
// When nil, no encryption will be used for saving.
configKey []byte
)
func init() {
@@ -156,6 +189,8 @@ type ConfigInfo struct {
DryRun bool
CheckSum bool
SizeOnly bool
IgnoreTimes bool
IgnoreExisting bool
ModifyWindow time.Duration
Checkers int
Transfers int
@@ -165,6 +200,13 @@ type ConfigInfo struct {
DumpBodies bool
Filter *Filter
InsecureSkipVerify bool // Skip server certificate verification
DeleteBefore bool // Delete before checking
DeleteDuring bool // Delete during checking/transfer
DeleteAfter bool // Delete after successful transfer.
LowLevelRetries int
UpdateOlder bool // Skip files that are newer on the destination
NoGzip bool // Disable compression
DedupeMode DeduplicateMode
}
// Transport returns an http.RoundTripper with the correct timeouts
@@ -199,6 +241,16 @@ func (ci *ConfigInfo) Transport() http.RoundTripper {
// In this mode, TLS is susceptible to man-in-the-middle attacks.
// This should be used only for testing.
TLSClientConfig: &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify},
// DisableCompression, if true, prevents the Transport from
// requesting compression with an "Accept-Encoding: gzip"
// request header when the Request contains no existing
// Accept-Encoding value. If the Transport requests gzip on
// its own and gets a gzipped response, it's transparently
// decoded in the Response.Body. However, if the user
// explicitly requested gzip it is not automatically
// uncompressed.
DisableCompression: *noGzip,
}
if ci.DumpHeaders || ci.DumpBodies {
return NewLoggedTransport(t, ci.DumpBodies)
@@ -249,21 +301,53 @@ func LoadConfig() {
Config.ConnectTimeout = *connectTimeout
Config.CheckSum = *checkSum
Config.SizeOnly = *sizeOnly
Config.IgnoreTimes = *ignoreTimes
Config.IgnoreExisting = *ignoreExisting
Config.DumpHeaders = *dumpHeaders
Config.DumpBodies = *dumpBodies
Config.InsecureSkipVerify = *skipVerify
Config.LowLevelRetries = *lowLevelRetries
Config.UpdateOlder = *updateOlder
Config.NoGzip = *noGzip
ConfigPath = *configFile
Config.DeleteBefore = *deleteBefore
Config.DeleteDuring = *deleteDuring
Config.DeleteAfter = *deleteAfter
switch strings.ToLower(*dedupeMode) {
case "interactive":
Config.DedupeMode = DeduplicateInteractive
case "skip":
Config.DedupeMode = DeduplicateSkip
case "first":
Config.DedupeMode = DeduplicateFirst
case "newest":
Config.DedupeMode = DeduplicateNewest
case "oldest":
Config.DedupeMode = DeduplicateOldest
case "rename":
Config.DedupeMode = DeduplicateRename
default:
log.Fatalf(`Unknown mode for --dedupe-mode %q.`, *dedupeMode)
}
switch {
case *deleteBefore && (*deleteDuring || *deleteAfter),
*deleteDuring && *deleteAfter:
log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`)
// If none are specified, use "during".
case !*deleteBefore && !*deleteDuring && !*deleteAfter:
Config.DeleteDuring = true
}
// Load configuration file.
var err error
ConfigFile, err = goconfig.LoadConfigFile(ConfigPath)
ConfigFile, err = loadConfigFile()
if err != nil {
log.Printf("Failed to load config file %v - using defaults: %v", ConfigPath, err)
ConfigFile, err = goconfig.LoadConfigFile(os.DevNull)
if err != nil {
log.Fatalf("Failed to read null config file: %v", err)
}
log.Fatalf("Failed to config file \"%s\": %v", ConfigPath, err)
}
// Load filters
@@ -276,12 +360,186 @@ func LoadConfig() {
startTokenBucket()
}
// loadConfigFile will load a config file, and
// automatically decrypt it.
func loadConfigFile() (*goconfig.ConfigFile, error) {
b, err := ioutil.ReadFile(ConfigPath)
if err != nil {
log.Printf("Failed to load config file \"%v\" - using defaults: %v", ConfigPath, err)
return goconfig.LoadFromReader(&bytes.Buffer{})
}
// Find first non-empty line
r := bufio.NewReader(bytes.NewBuffer(b))
for {
line, _, err := r.ReadLine()
if err != nil {
if err == io.EOF {
return goconfig.LoadFromReader(bytes.NewBuffer(b))
}
return nil, err
}
l := strings.TrimSpace(string(line))
if len(l) == 0 || strings.HasPrefix(l, ";") || strings.HasPrefix(l, "#") {
continue
}
// First non-empty or non-comment must be ENCRYPT_V0
if l == "RCLONE_ENCRYPT_V0:" {
break
}
if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
return nil, fmt.Errorf("Unsupported configuration encryption. Update rclone for support.")
}
return goconfig.LoadFromReader(bytes.NewBuffer(b))
}
// Encrypted content is base64 encoded.
dec := base64.NewDecoder(base64.StdEncoding, r)
box, err := ioutil.ReadAll(dec)
if err != nil {
return nil, fmt.Errorf("Failed to load base64 encoded data: %v", err)
}
if len(box) < 24+secretbox.Overhead {
return nil, fmt.Errorf("Configuration data too short")
}
envpw := os.Getenv("RCLONE_CONFIG_PASS")
var out []byte
for {
if len(configKey) == 0 && envpw != "" {
err := setPassword(envpw)
if err != nil {
fmt.Println("Using RCLONE_CONFIG_PASS returned:", err)
envpw = ""
} else {
Debug(nil, "Using RCLONE_CONFIG_PASS password.")
}
}
if len(configKey) == 0 {
if !*AskPassword {
return nil, fmt.Errorf("Unable to decrypt configuration and not allowed to ask for password. Set RCLONE_CONFIG_PASS to your configuration password.")
}
getPassword("Enter configuration password:")
}
// Nonce is first 24 bytes of the ciphertext
var nonce [24]byte
copy(nonce[:], box[:24])
var key [32]byte
copy(key[:], configKey[:32])
// Attempt to decrypt
var ok bool
out, ok = secretbox.Open(nil, box[24:], &nonce, &key)
if ok {
break
}
// Retry
log.Println("Couldn't decrypt configuration, most likely wrong password.")
configKey = nil
envpw = ""
}
return goconfig.LoadFromReader(bytes.NewBuffer(out))
}
// getPassword will query the user for a password the
// first time it is required.
func getPassword(q string) {
if len(configKey) != 0 {
return
}
for {
fmt.Println(q)
fmt.Print("password>")
err := setPassword(ReadPassword())
if err == nil {
return
}
fmt.Println("Error:", err)
}
}
// setPassword will set the configKey to the hash of
// the password. If the length of the password is
// zero after trimming+normalization, an error is returned.
func setPassword(password string) error {
if !utf8.ValidString(password) {
return fmt.Errorf("Password contains invalid utf8 characters")
}
// Remove leading+trailing whitespace
password = strings.TrimSpace(password)
// Normalize to reduce weird variations.
password = norm.NFKC.String(password)
if len(password) == 0 {
return fmt.Errorf("No characters in password")
}
// Create SHA256 has of the password
sha := sha256.New()
_, err := sha.Write([]byte("[" + password + "][rclone-config]"))
if err != nil {
return err
}
configKey = sha.Sum(nil)
return nil
}
// SaveConfig saves configuration file.
// if configKey has been set, the file will be encrypted.
func SaveConfig() {
err := goconfig.SaveConfigFile(ConfigFile, ConfigPath)
if len(configKey) == 0 {
err := goconfig.SaveConfigFile(ConfigFile, ConfigPath)
if err != nil {
log.Fatalf("Failed to save config file: %v", err)
}
err = os.Chmod(ConfigPath, 0600)
if err != nil {
log.Printf("Failed to set permissions on config file: %v", err)
}
return
}
var buf bytes.Buffer
err := goconfig.SaveConfigData(ConfigFile, &buf)
if err != nil {
log.Fatalf("Failed to save config file: %v", err)
}
f, err := os.Create(ConfigPath)
if err != nil {
log.Fatalf("Failed to save config file: %v", err)
}
fmt.Fprintln(f, "# Encrypted rclone configuration File")
fmt.Fprintln(f, "")
fmt.Fprintln(f, "RCLONE_ENCRYPT_V0:")
// Generate new nonce and write it to the start of the ciphertext
var nonce [24]byte
n, _ := rand.Read(nonce[:])
if n != 24 {
log.Fatalf("nonce short read: %d", n)
}
enc := base64.NewEncoder(base64.StdEncoding, f)
_, err = enc.Write(nonce[:])
if err != nil {
log.Fatalf("Failed to write config file: %v", err)
}
var key [32]byte
copy(key[:], configKey[:32])
b := secretbox.Seal(nil, buf.Bytes(), &nonce, &key)
_, err = enc.Write(b)
if err != nil {
log.Fatalf("Failed to write config file: %v", err)
}
_ = enc.Close()
err = f.Close()
if err != nil {
log.Fatalf("Failed to close config file: %v", err)
}
err = os.Chmod(ConfigPath, 0600)
if err != nil {
log.Printf("Failed to set permissions on config file: %v", err)
@@ -354,13 +612,34 @@ func Choose(what string, defaults, help []string, newOk bool) string {
}
fmt.Println()
for i, text := range defaults {
var lines []string
if help != nil {
parts := strings.Split(help[i], "\n")
for _, part := range parts {
fmt.Printf(" * %s\n", part)
lines = append(lines, parts...)
}
lines = append(lines, fmt.Sprintf("%q", text))
pos := i + 1
if len(lines) == 1 {
fmt.Printf("%2d > %s\n", pos, text)
} else {
mid := (len(lines) - 1) / 2
for i, line := range lines {
var sep rune
switch i {
case 0:
sep = '/'
case len(lines) - 1:
sep = '\\'
default:
sep = '|'
}
number := " "
if i == mid {
number = fmt.Sprintf("%2d", pos)
}
fmt.Printf("%s %c %s\n", number, sep, line)
}
}
fmt.Printf("%2d) %s\n", i+1, text)
}
for {
fmt.Printf("%s> ", what)
@@ -378,6 +657,25 @@ func Choose(what string, defaults, help []string, newOk bool) string {
}
}
// ChooseNumber asks the user to enter a number between min and max
// inclusive prompting them with what.
func ChooseNumber(what string, min, max int) int {
for {
fmt.Printf("%s> ", what)
result := ReadLine()
i, err := strconv.Atoi(result)
if err != nil {
fmt.Printf("Bad number: %v\n", err)
continue
}
if i < min || i > max {
fmt.Printf("Out of range - %d to %d inclusive\n", min, max)
continue
}
return i
}
}
// ShowRemote shows the contents of the remote
func ShowRemote(name string) {
fmt.Printf("--------------------\n")
@@ -437,14 +735,26 @@ func ChooseOption(o *Option) string {
return ReadLine()
}
// fsOption returns an Option describing the possible remotes
func fsOption() *Option {
o := &Option{
Name: "Storage",
Help: "Type of storage to configure.",
}
for _, item := range fsRegistry {
example := OptionExample{
Value: item.Name,
Help: item.Description,
}
o.Examples = append(o.Examples, example)
}
o.Examples.Sort()
return o
}
// NewRemote make a new remote from its name
func NewRemote(name string) {
fmt.Printf("What type of source is it?\n")
types := []string{}
for _, item := range fsRegistry {
types = append(types, item.Name)
}
newType := Choose("type", types, nil, false)
newType := ChooseOption(fsOption())
ConfigFile.SetValue(name, "type", newType)
fs, err := Find(newType)
if err != nil {
@@ -493,14 +803,14 @@ func DeleteRemote(name string) {
func EditConfig() {
for {
haveRemotes := len(ConfigFile.GetSectionList()) != 0
what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "qQuit config"}
what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "sSet configuration password", "qQuit config"}
if haveRemotes {
fmt.Printf("Current remotes:\n\n")
ShowRemotes()
fmt.Printf("\n")
} else {
fmt.Printf("No remotes found - make a new one\n")
what = append(what[1:2], what[3])
what = append(what[1:2], what[3:]...)
}
switch i := Command(what); i {
case 'e':
@@ -527,8 +837,104 @@ func EditConfig() {
case 'd':
name := ChooseRemote()
DeleteRemote(name)
case 's':
SetPassword()
case 'q':
return
}
}
}
// SetPassword will allow the user to modify the current
// configuration encryption settings.
func SetPassword() {
for {
if len(configKey) > 0 {
fmt.Println("Your configuration is encrypted.")
what := []string{"cChange Password", "uUnencrypt configuration", "qQuit to main menu"}
switch i := Command(what); i {
case 'c':
changePassword()
SaveConfig()
fmt.Println("Password changed")
continue
case 'u':
configKey = nil
SaveConfig()
continue
case 'q':
return
}
} else {
fmt.Println("Your configuration is not encrypted.")
fmt.Println("If you add a password, you will protect your login information to cloud services.")
what := []string{"aAdd Password", "qQuit to main menu"}
switch i := Command(what); i {
case 'a':
changePassword()
SaveConfig()
fmt.Println("Password set")
continue
case 'q':
return
}
}
}
}
// changePassword will query the user twice
// for a password. If the same password is entered
// twice the key is updated.
func changePassword() {
for {
configKey = nil
getPassword("Enter NEW configuration password:")
a := configKey
// re-enter password
configKey = nil
getPassword("Confirm NEW password:")
b := configKey
if bytes.Equal(a, b) {
return
}
fmt.Println("Passwords does not match!")
}
}
// Authorize is for remote authorization of headless machines.
//
// It expects 1 or 3 arguments
//
// rclone authorize "fs name"
// rclone authorize "fs name" "client id" "client secret"
func Authorize(args []string) {
switch len(args) {
case 1, 3:
default:
log.Fatalf("Invalid number of arguments: %d", len(args))
}
newType := args[0]
fs, err := Find(newType)
if err != nil {
log.Fatalf("Failed to find fs: %v", err)
}
if fs.Config == nil {
log.Fatalf("Can't authorize fs %q", newType)
}
// Name used for temporary fs
name := "**temp-fs**"
// Make sure we delete it
defer DeleteRemote(name)
// Indicate that we want fully automatic configuration.
ConfigFile.SetValue(name, ConfigAutomatic, "yes")
if len(args) == 3 {
ConfigFile.SetValue(name, ConfigClientID, args[1])
ConfigFile.SetValue(name, ConfigClientSecret, args[2])
}
fs.Config(name)
}

View File

@@ -0,0 +1,26 @@
// ReadPassword for OSes which are supported by golang.org/x/crypto/ssh/terminal
// See https://github.com/golang/go/issues/14441 - plan9
// https://github.com/golang/go/issues/13085 - solaris
// +build !solaris,!plan9
package fs
import (
"fmt"
"log"
"os"
"strings"
"golang.org/x/crypto/ssh/terminal"
)
// ReadPassword reads a password without echoing it to the terminal.
func ReadPassword() string {
line, err := terminal.ReadPassword(int(os.Stdin.Fd()))
fmt.Println("")
if err != nil {
log.Fatalf("Failed to read password: %v", err)
}
return strings.TrimSpace(string(line))
}

View File

@@ -0,0 +1,12 @@
// ReadPassword for OSes which are not supported by golang.org/x/crypto/ssh/terminal
// See https://github.com/golang/go/issues/14441 - plan9
// https://github.com/golang/go/issues/13085 - solaris
// +build solaris plan9
package fs
// ReadPassword reads a password with echoing it to the terminal.
func ReadPassword() string {
return ReadLine()
}

View File

@@ -1,6 +1,10 @@
package fs
import "testing"
import (
"bytes"
"reflect"
"testing"
)
func TestSizeSuffixString(t *testing.T) {
for _, test := range []struct {
@@ -73,3 +77,150 @@ func TestReveal(t *testing.T) {
}
}
}
func TestConfigLoad(t *testing.T) {
oldConfigPath := ConfigPath
ConfigPath = "./testdata/plain.conf"
defer func() {
ConfigPath = oldConfigPath
}()
configKey = nil // reset password
c, err := loadConfigFile()
if err != nil {
t.Fatal(err)
}
sections := c.GetSectionList()
var expect = []string{"RCLONE_ENCRYPT_V0", "nounc", "unc"}
if !reflect.DeepEqual(sections, expect) {
t.Fatalf("%v != %v", sections, expect)
}
keys := c.GetKeyList("nounc")
expect = []string{"type", "nounc"}
if !reflect.DeepEqual(keys, expect) {
t.Fatalf("%v != %v", keys, expect)
}
}
func TestConfigLoadEncrypted(t *testing.T) {
var err error
oldConfigPath := ConfigPath
ConfigPath = "./testdata/encrypted.conf"
defer func() {
ConfigPath = oldConfigPath
configKey = nil // reset password
}()
// Set correct password
err = setPassword("asdf")
if err != nil {
t.Fatal(err)
}
c, err := loadConfigFile()
if err != nil {
t.Fatal(err)
}
sections := c.GetSectionList()
var expect = []string{"nounc", "unc"}
if !reflect.DeepEqual(sections, expect) {
t.Fatalf("%v != %v", sections, expect)
}
keys := c.GetKeyList("nounc")
expect = []string{"type", "nounc"}
if !reflect.DeepEqual(keys, expect) {
t.Fatalf("%v != %v", keys, expect)
}
}
func TestConfigLoadEncryptedFailures(t *testing.T) {
var err error
// This file should be too short to be decoded.
oldConfigPath := ConfigPath
ConfigPath = "./testdata/enc-short.conf"
defer func() { ConfigPath = oldConfigPath }()
_, err = loadConfigFile()
if err == nil {
t.Fatal("expected error")
}
t.Log("Correctly got:", err)
// This file contains invalid base64 characters.
ConfigPath = "./testdata/enc-invalid.conf"
_, err = loadConfigFile()
if err == nil {
t.Fatal("expected error")
}
t.Log("Correctly got:", err)
// This file contains invalid base64 characters.
ConfigPath = "./testdata/enc-too-new.conf"
_, err = loadConfigFile()
if err == nil {
t.Fatal("expected error")
}
t.Log("Correctly got:", err)
// This file contains invalid base64 characters.
ConfigPath = "./testdata/filenotfound.conf"
c, err := loadConfigFile()
if err != nil {
t.Fatal(err)
}
if len(c.GetSectionList()) != 0 {
t.Fatalf("Expected 0-length section, got %d entries", len(c.GetSectionList()))
}
}
func TestPassword(t *testing.T) {
defer func() {
configKey = nil // reset password
}()
var err error
// Empty password should give error
err = setPassword(" \t ")
if err == nil {
t.Fatal("expected error")
}
// Test invalid utf8 sequence
err = setPassword(string([]byte{0xff, 0xfe, 0xfd}) + "abc")
if err == nil {
t.Fatal("expected error")
}
// Simple check of wrong passwords
hashedKeyCompare(t, "mis", "match", false)
// Check that passwords match with trimmed whitespace
hashedKeyCompare(t, " abcdef \t", "abcdef", true)
// Check that passwords match after unicode normalization
hashedKeyCompare(t, "ff\u0041\u030A", "ffÅ", true)
// Check that passwords preserves case
hashedKeyCompare(t, "abcdef", "ABCDEF", false)
}
func hashedKeyCompare(t *testing.T, a, b string, shouldMatch bool) {
err := setPassword(a)
if err != nil {
t.Fatal(err)
}
k1 := configKey
err = setPassword(b)
if err != nil {
t.Fatal(err)
}
k2 := configKey
matches := bytes.Equal(k1, k2)
if shouldMatch && !matches {
t.Fatalf("%v != %v", k1, k2)
}
if !shouldMatch && matches {
t.Fatalf("%v == %v", k1, k2)
}
}

View File

@@ -7,7 +7,9 @@ import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/spf13/pflag"
)
@@ -23,6 +25,8 @@ var (
includeRule = pflag.StringP("include", "", "", "Include files matching pattern")
includeFrom = pflag.StringP("include-from", "", "", "Read include patterns from file")
filesFrom = pflag.StringP("files-from", "", "", "Read list of source-file names from file")
minAge = pflag.StringP("min-age", "", "", "Don't transfer any file younger than this in s or suffix ms|s|m|h|d|w|M|y")
maxAge = pflag.StringP("max-age", "", "", "Don't transfer any file older than this in s or suffix ms|s|m|h|d|w|M|y")
minSize SizeSuffix
maxSize SizeSuffix
dumpFilters = pflag.BoolP("dump-filters", "", false, "Dump the filters to the output")
@@ -62,10 +66,50 @@ type Filter struct {
DeleteExcluded bool
MinSize int64
MaxSize int64
ModTimeFrom time.Time
ModTimeTo time.Time
rules []rule
files filesMap
}
// We use time conventions
var ageSuffixes = []struct {
Suffix string
Multiplier time.Duration
}{
{Suffix: "ms", Multiplier: time.Millisecond},
{Suffix: "s", Multiplier: time.Second},
{Suffix: "m", Multiplier: time.Minute},
{Suffix: "h", Multiplier: time.Hour},
{Suffix: "d", Multiplier: time.Hour * 24},
{Suffix: "w", Multiplier: time.Hour * 24 * 7},
{Suffix: "M", Multiplier: time.Hour * 24 * 30},
{Suffix: "y", Multiplier: time.Hour * 24 * 365},
// Default to second
{Suffix: "", Multiplier: time.Second},
}
// ParseDuration parses a duration string. Accept ms|s|m|h|d|w|M|y suffixes. Defaults to second if not provided
func ParseDuration(age string) (time.Duration, error) {
var period float64
for _, ageSuffix := range ageSuffixes {
if strings.HasSuffix(age, ageSuffix.Suffix) {
numberString := age[:len(age)-len(ageSuffix.Suffix)]
var err error
period, err = strconv.ParseFloat(numberString, 64)
if err != nil {
return time.Duration(0), err
}
period *= float64(ageSuffix.Multiplier)
break
}
}
return time.Duration(period), nil
}
// NewFilter parses the command line options and creates a Filter object
func NewFilter() (f *Filter, err error) {
f = &Filter{
@@ -73,16 +117,14 @@ func NewFilter() (f *Filter, err error) {
MinSize: int64(minSize),
MaxSize: int64(maxSize),
}
addImplicitExclude := false
if *includeRule != "" {
err = f.Add(true, *includeRule)
if err != nil {
return nil, err
}
// Add implicit exclude
err = f.Add(false, "*")
if err != nil {
return nil, err
}
addImplicitExclude = true
}
if *includeFrom != "" {
err := forEachLine(*includeFrom, func(line string) error {
@@ -91,11 +133,7 @@ func NewFilter() (f *Filter, err error) {
if err != nil {
return nil, err
}
// Add implicit exclude
err = f.Add(false, "*")
if err != nil {
return nil, err
}
addImplicitExclude = true
}
if *excludeRule != "" {
err = f.Add(false, *excludeRule)
@@ -131,6 +169,31 @@ func NewFilter() (f *Filter, err error) {
return nil, err
}
}
if addImplicitExclude {
err = f.Add(false, "*")
if err != nil {
return nil, err
}
}
if *minAge != "" {
duration, err := ParseDuration(*minAge)
if err != nil {
return nil, err
}
f.ModTimeTo = time.Now().Add(-duration)
Debug(nil, "--min-age %v to %v", duration, f.ModTimeTo)
}
if *maxAge != "" {
duration, err := ParseDuration(*maxAge)
if err != nil {
return nil, err
}
f.ModTimeFrom = time.Now().Add(-duration)
if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) {
return nil, fmt.Errorf("Argument --min-age can't be larger than --max-age")
}
Debug(nil, "--max-age %v to %v", duration, f.ModTimeFrom)
}
if *dumpFilters {
fmt.Println("--- start filters ---")
fmt.Println(f.DumpFilters())
@@ -192,14 +255,30 @@ func (f *Filter) Clear() {
f.rules = nil
}
// InActive returns false if any filters are active
func (f *Filter) InActive() bool {
return (f.files == nil &&
f.ModTimeFrom.IsZero() &&
f.ModTimeTo.IsZero() &&
f.MinSize == 0 &&
f.MaxSize == 0 &&
len(f.rules) == 0)
}
// Include returns whether this object should be included into the
// sync or not
func (f *Filter) Include(remote string, size int64) bool {
func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
// filesFrom takes precedence
if f.files != nil {
_, include := f.files[remote]
return include
}
if !f.ModTimeFrom.IsZero() && modTime.Before(f.ModTimeFrom) {
return false
}
if !f.ModTimeTo.IsZero() && modTime.After(f.ModTimeTo) {
return false
}
if f.MinSize != 0 && size < f.MinSize {
return false
}
@@ -214,6 +293,21 @@ func (f *Filter) Include(remote string, size int64) bool {
return true
}
// IncludeObject returns whether this object should be included into
// the sync or not. This is a convenience function to avoid calling
// o.ModTime(), which is an expensive operation.
func (f *Filter) IncludeObject(o Object) bool {
var modTime time.Time
if !f.ModTimeFrom.IsZero() || !f.ModTimeTo.IsZero() {
modTime = o.ModTime()
} else {
modTime = time.Unix(0, 0)
}
return f.Include(o.Remote(), o.Size(), modTime)
}
// forEachLine calls fn on every line in the file pointed to by path
//
// It ignores empty lines and lines starting with '#' or ';'
@@ -222,7 +316,7 @@ func forEachLine(path string, fn func(string) error) (err error) {
if err != nil {
return err
}
defer checkClose(in, &err)
defer CheckClose(in, &err)
scanner := bufio.NewScanner(in)
for scanner.Scan() {
line := scanner.Text()
@@ -241,6 +335,12 @@ func forEachLine(path string, fn func(string) error) (err error) {
// DumpFilters dumps the filters in textual form, 1 per line
func (f *Filter) DumpFilters() string {
rules := []string{}
if !f.ModTimeFrom.IsZero() {
rules = append(rules, fmt.Sprintf("Last-modified date must be equal or greater than: %s", f.ModTimeFrom.String()))
}
if !f.ModTimeTo.IsZero() {
rules = append(rules, fmt.Sprintf("Last-modified date must be equal or less than: %s", f.ModTimeTo.String()))
}
for _, rule := range f.rules {
rules = append(rules, rule.String())
}

View File

@@ -5,8 +5,43 @@ import (
"os"
"strings"
"testing"
"time"
)
func TestAgeSuffix(t *testing.T) {
for i, test := range []struct {
in string
want float64
err bool
}{
{"0", 0, false},
{"", 0, true},
{"1ms", float64(time.Millisecond), false},
{"1s", float64(time.Second), false},
{"1m", float64(time.Minute), false},
{"1h", float64(time.Hour), false},
{"1d", float64(time.Hour) * 24, false},
{"1w", float64(time.Hour) * 24 * 7, false},
{"1M", float64(time.Hour) * 24 * 30, false},
{"1y", float64(time.Hour) * 24 * 365, false},
{"1.5y", float64(time.Hour) * 24 * 365 * 1.5, false},
{"-1s", -float64(time.Second), false},
{"1.s", float64(time.Second), false},
{"1x", 0, true},
} {
duration, err := ParseDuration(test.in)
if (err != nil) != test.err {
t.Errorf("%d: Expecting error %v but got error %v", i, test.err, err)
continue
}
got := float64(duration)
if test.want != got {
t.Errorf("%d: Want %v got %v", i, test.want, got)
}
}
}
func TestNewFilterDefault(t *testing.T) {
f, err := NewFilter()
if err != nil {
@@ -27,6 +62,9 @@ func TestNewFilterDefault(t *testing.T) {
if f.files != nil {
t.Errorf("files want none got %v", f.files)
}
if !f.InActive() {
t.Errorf("want InActive")
}
}
// return a pointer to the string
@@ -112,16 +150,15 @@ func TestNewFilterFull(t *testing.T) {
}
got := f.DumpFilters()
want := `+ (^|/)include1$
- (^|/)[^/]*$
+ (^|/)include2$
+ (^|/)include3$
- (^|/)[^/]*$
- (^|/)exclude1$
- (^|/)exclude2$
- (^|/)exclude3$
- (^|/)filter1$
+ (^|/)filter2$
- (^|/)filter3$`
- (^|/)filter3$
- (^|/)[^/]*$`
if got != want {
t.Errorf("rules want %s got %s", want, got)
}
@@ -134,19 +171,23 @@ func TestNewFilterFull(t *testing.T) {
t.Errorf("Didn't find file %q in f.files", name)
}
}
if f.InActive() {
t.Errorf("want !InActive")
}
}
type includeTest struct {
in string
size int64
want bool
in string
size int64
modTime int64
want bool
}
func testInclude(t *testing.T, f *Filter, tests []includeTest) {
for _, test := range tests {
got := f.Include(test.in, test.size)
got := f.Include(test.in, test.size, time.Unix(test.modTime, 0))
if test.want != got {
t.Errorf("%q,%d: want %v got %v", test.in, test.size, test.want, got)
t.Errorf("%q,%d,%d: want %v got %v", test.in, test.size, test.modTime, test.want, got)
}
}
}
@@ -165,11 +206,14 @@ func TestNewFilterIncludeFiles(t *testing.T) {
t.Error(err)
}
testInclude(t, f, []includeTest{
{"file1.jpg", 0, true},
{"file2.jpg", 1, true},
{"potato/file2.jpg", 2, false},
{"file3.jpg", 3, false},
{"file1.jpg", 0, 0, true},
{"file2.jpg", 1, 0, true},
{"potato/file2.jpg", 2, 0, false},
{"file3.jpg", 3, 0, false},
})
if f.InActive() {
t.Errorf("want !InActive")
}
}
func TestNewFilterMinSize(t *testing.T) {
@@ -179,10 +223,13 @@ func TestNewFilterMinSize(t *testing.T) {
}
f.MinSize = 100
testInclude(t, f, []includeTest{
{"file1.jpg", 100, true},
{"file2.jpg", 101, true},
{"potato/file2.jpg", 99, false},
{"file1.jpg", 100, 0, true},
{"file2.jpg", 101, 0, true},
{"potato/file2.jpg", 99, 0, false},
})
if f.InActive() {
t.Errorf("want !InActive")
}
}
func TestNewFilterMaxSize(t *testing.T) {
@@ -192,10 +239,68 @@ func TestNewFilterMaxSize(t *testing.T) {
}
f.MaxSize = 100
testInclude(t, f, []includeTest{
{"file1.jpg", 100, true},
{"file2.jpg", 101, false},
{"potato/file2.jpg", 99, true},
{"file1.jpg", 100, 0, true},
{"file2.jpg", 101, 0, false},
{"potato/file2.jpg", 99, 0, true},
})
if f.InActive() {
t.Errorf("want !InActive")
}
}
func TestNewFilterMinAndMaxAge(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
f.ModTimeFrom = time.Unix(1440000002, 0)
f.ModTimeTo = time.Unix(1440000003, 0)
testInclude(t, f, []includeTest{
{"file1.jpg", 100, 1440000000, false},
{"file2.jpg", 101, 1440000001, false},
{"file3.jpg", 102, 1440000002, true},
{"potato/file1.jpg", 98, 1440000003, true},
{"potato/file2.jpg", 99, 1440000004, false},
})
if f.InActive() {
t.Errorf("want !InActive")
}
}
func TestNewFilterMinAge(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
f.ModTimeTo = time.Unix(1440000002, 0)
testInclude(t, f, []includeTest{
{"file1.jpg", 100, 1440000000, true},
{"file2.jpg", 101, 1440000001, true},
{"file3.jpg", 102, 1440000002, true},
{"potato/file1.jpg", 98, 1440000003, false},
{"potato/file2.jpg", 99, 1440000004, false},
})
if f.InActive() {
t.Errorf("want !InActive")
}
}
func TestNewFilterMaxAge(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
f.ModTimeFrom = time.Unix(1440000002, 0)
testInclude(t, f, []includeTest{
{"file1.jpg", 100, 1440000000, false},
{"file2.jpg", 101, 1440000001, false},
{"file3.jpg", 102, 1440000002, true},
{"potato/file1.jpg", 98, 1440000003, true},
{"potato/file2.jpg", 99, 1440000004, true},
})
if f.InActive() {
t.Errorf("want !InActive")
}
}
func TestNewFilterMatches(t *testing.T) {
@@ -221,20 +326,23 @@ func TestNewFilterMatches(t *testing.T) {
add("+ /sausage3**")
add("- *")
testInclude(t, f, []includeTest{
{"cleared", 100, false},
{"file1.jpg", 100, false},
{"file2.png", 100, true},
{"afile2.png", 100, false},
{"file3.jpg", 101, true},
{"file4.png", 101, false},
{"potato", 101, false},
{"sausage1", 101, true},
{"sausage1/potato", 101, false},
{"sausage2potato", 101, true},
{"sausage2/potato", 101, false},
{"sausage3/potato", 101, true},
{"unicorn", 99, false},
{"cleared", 100, 0, false},
{"file1.jpg", 100, 0, false},
{"file2.png", 100, 0, true},
{"afile2.png", 100, 0, false},
{"file3.jpg", 101, 0, true},
{"file4.png", 101, 0, false},
{"potato", 101, 0, false},
{"sausage1", 101, 0, true},
{"sausage1/potato", 101, 0, false},
{"sausage2potato", 101, 0, true},
{"sausage2/potato", 101, 0, false},
{"sausage3/potato", 101, 0, true},
{"unicorn", 99, 0, false},
})
if f.InActive() {
t.Errorf("want !InActive")
}
}
func TestFilterForEachLine(t *testing.T) {
@@ -316,7 +424,7 @@ func TestFilterMatchesFromDocs(t *testing.T) {
if err != nil {
t.Fatal(err)
}
included := f.Include(test.file, 0)
included := f.Include(test.file, 0, time.Unix(0, 0))
if included != test.included {
t.Logf("%q match %q: want %v got %v", test.glob, test.file, test.included, included)
}

160
fs/fs.go
View File

@@ -7,6 +7,7 @@ import (
"log"
"path/filepath"
"regexp"
"sort"
"time"
)
@@ -22,20 +23,23 @@ const (
// Globals
var (
// Filesystem registry
fsRegistry []*Info
fsRegistry []*RegInfo
// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
ErrorNotFoundInConfigFile = fmt.Errorf("Didn't find section in config file")
ErrorCantPurge = fmt.Errorf("Can't purge directory")
ErrorCantCopy = fmt.Errorf("Can't copy object - incompatible remotes")
ErrorCantMove = fmt.Errorf("Can't copy object - incompatible remotes")
ErrorCantDirMove = fmt.Errorf("Can't copy directory - incompatible remotes")
ErrorCantMove = fmt.Errorf("Can't move object - incompatible remotes")
ErrorCantDirMove = fmt.Errorf("Can't move directory - incompatible remotes")
ErrorDirExists = fmt.Errorf("Can't copy directory - destination already exists")
ErrorCantSetModTime = fmt.Errorf("Can't set modified time")
)
// Info information about a filesystem
type Info struct {
// RegInfo provides information about a filesystem
type RegInfo struct {
// Name of this fs
Name string
// Description of this fs - defaults to Name
Description string
// Create a new file system. If root refers to an existing
// object, then it should return a Fs which only returns that
// object.
@@ -51,9 +55,24 @@ type Option struct {
Name string
Help string
Optional bool
Examples []OptionExample
Examples OptionExamples
}
// OptionExamples is a slice of examples
type OptionExamples []OptionExample
// Len is part of sort.Interface.
func (os OptionExamples) Len() int { return len(os) }
// Swap is part of sort.Interface.
func (os OptionExamples) Swap(i, j int) { os[i], os[j] = os[j], os[i] }
// Less is part of sort.Interface.
func (os OptionExamples) Less(i, j int) bool { return os[i].Help < os[j].Help }
// Sort sorts an OptionExamples
func (os OptionExamples) Sort() { sort.Sort(os) }
// OptionExample describes an example for an Option
type OptionExample struct {
Value string
@@ -63,20 +82,13 @@ type OptionExample struct {
// Register a filesystem
//
// Fs modules should use this in an init() function
func Register(info *Info) {
func Register(info *RegInfo) {
fsRegistry = append(fsRegistry, info)
}
// Fs is the interface a cloud storage system must provide
type Fs interface {
// Name of the remote (as passed into NewFs)
Name() string
// Root of the remote (as passed into NewFs)
Root() string
// String returns a description of the FS
String() string
Info
// List the Fs into a channel
List() ObjectsChan
@@ -92,7 +104,7 @@ type Fs interface {
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
Put(in io.Reader, remote string, modTime time.Time, size int64) (Object, error)
Put(in io.Reader, src ObjectInfo) (Object, error)
// Mkdir makes the directory (container, bucket)
//
@@ -103,49 +115,69 @@ type Fs interface {
//
// Return an error if it doesn't exist or isn't empty
Rmdir() error
}
// Info provides an interface to reading information about a filesystem.
type Info interface {
// Name of the remote (as passed into NewFs)
Name() string
// Root of the remote (as passed into NewFs)
Root() string
// String returns a description of the FS
String() string
// Precision of the ModTimes in this Fs
Precision() time.Duration
// Returns the supported hash types of the filesystem
Hashes() HashSet
}
// Object is a filesystem like object provided by an Fs
type Object interface {
ObjectInfo
// String returns a description of the Object
String() string
// Fs returns the Fs that this object is part of
Fs() Fs
// Remote returns the remote path
Remote() string
// Md5sum returns the md5 checksum of the file
// If no Md5sum is available it returns ""
Md5sum() (string, error)
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
ModTime() time.Time
// SetModTime sets the metadata on the object to set the modification date
SetModTime(time.Time)
// Size returns the size of the file
Size() int64
SetModTime(time.Time) error
// Open opens the file for read. Call Close() on the returned io.ReadCloser
Open() (io.ReadCloser, error)
// Update in to the object with the modTime given of the given size
Update(in io.Reader, modTime time.Time, size int64) error
// Storable says whether this object can be stored
Storable() bool
Update(in io.Reader, src ObjectInfo) error
// Removes this object
Remove() error
}
// ObjectInfo contains information about an object.
type ObjectInfo interface {
// Fs returns read only access to the Fs that this object is part of
Fs() Info
// Remote returns the remote path
Remote() string
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
Hash(HashType) (string, error)
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
ModTime() time.Time
// Size returns the size of the file
Size() int64
// Storable says whether this object can be stored
Storable() bool
}
// Purger is an optional interfaces for Fs
type Purger interface {
// Purge all files in the root and the root directory
@@ -233,7 +265,7 @@ type DirChan chan *Dir
// Find looks for an Info object for the name passed in
//
// Services are looked up in the config file
func Find(name string) (*Info, error) {
func Find(name string) (*RegInfo, error) {
for _, item := range fsRegistry {
if item.Name == name {
return item, nil
@@ -305,11 +337,57 @@ func ErrorLog(o interface{}, text string, args ...interface{}) {
OutputLog(o, text, args...)
}
// checkClose is a utility function used to check the return from
// CheckClose is a utility function used to check the return from
// Close in a defer statement.
func checkClose(c io.Closer, err *error) {
func CheckClose(c io.Closer, err *error) {
cerr := c.Close()
if *err == nil {
*err = cerr
}
}
// NewStaticObjectInfo returns a static ObjectInfo
// If hashes is nil and fs is not nil, the hash map will be replaced with
// empty hashes of the types supported by the fs.
func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable bool, hashes map[HashType]string, fs Info) ObjectInfo {
info := &staticObjectInfo{
remote: remote,
modTime: modTime,
size: size,
storable: storable,
hashes: hashes,
fs: fs,
}
if fs != nil && hashes == nil {
set := fs.Hashes().Array()
info.hashes = make(map[HashType]string)
for _, ht := range set {
info.hashes[ht] = ""
}
}
return info
}
type staticObjectInfo struct {
remote string
modTime time.Time
size int64
storable bool
hashes map[HashType]string
fs Info
}
func (i *staticObjectInfo) Fs() Info { return i.fs }
func (i *staticObjectInfo) Remote() string { return i.remote }
func (i *staticObjectInfo) ModTime() time.Time { return i.modTime }
func (i *staticObjectInfo) Size() int64 { return i.size }
func (i *staticObjectInfo) Storable() bool { return i.storable }
func (i *staticObjectInfo) Hash(h HashType) (string, error) {
if len(i.hashes) == 0 {
return "", ErrHashUnsupported
}
if hash, ok := i.hashes[h]; ok {
return hash, nil
}
return "", ErrHashUnsupported
}

241
fs/hash.go Normal file
View File

@@ -0,0 +1,241 @@
package fs
import (
"crypto/md5"
"crypto/sha1"
"encoding/hex"
"fmt"
"hash"
"io"
"strings"
)
// HashType indicates a standard hashing algorithm
type HashType int
// ErrHashUnsupported should be returned by filesystem,
// if it is requested to deliver an unsupported hash type.
var ErrHashUnsupported = fmt.Errorf("hash type not supported")
const (
// HashMD5 indicates MD5 support
HashMD5 HashType = 1 << iota
// HashSHA1 indicates SHA-1 support
HashSHA1
// HashNone indicates no hashes are supported
HashNone HashType = 0
)
// SupportedHashes returns a set of all the supported hashes by
// HashStream and MultiHasher.
var SupportedHashes = NewHashSet(HashMD5, HashSHA1)
// HashWidth returns the width in characters for any HashType
var HashWidth = map[HashType]int{
HashMD5: 32,
HashSHA1: 40,
}
// HashStream will calculate hashes of all supported hash types.
func HashStream(r io.Reader) (map[HashType]string, error) {
return HashStreamTypes(r, SupportedHashes)
}
// HashStreamTypes will calculate hashes of the requested hash types.
func HashStreamTypes(r io.Reader, set HashSet) (map[HashType]string, error) {
hashers, err := hashFromTypes(set)
if err != nil {
return nil, err
}
_, err = io.Copy(hashToMultiWriter(hashers), r)
if err != nil {
return nil, err
}
var ret = make(map[HashType]string)
for k, v := range hashers {
ret[k] = hex.EncodeToString(v.Sum(nil))
}
return ret, nil
}
// String returns a string representation of the hash type.
// The function will panic if the hash type is unknown.
func (h HashType) String() string {
switch h {
case HashNone:
return "None"
case HashMD5:
return "MD5"
case HashSHA1:
return "SHA-1"
default:
err := fmt.Sprintf("internal error: unknown hash type: 0x%x", int(h))
panic(err)
}
}
// hashFromTypes will return hashers for all the requested types.
// The types must be a subset of SupportedHashes,
// and this function must support all types.
func hashFromTypes(set HashSet) (map[HashType]hash.Hash, error) {
if !set.SubsetOf(SupportedHashes) {
return nil, fmt.Errorf("Requested set %08x contains unknown hash types", int(set))
}
var hashers = make(map[HashType]hash.Hash)
types := set.Array()
for _, t := range types {
switch t {
case HashMD5:
hashers[t] = md5.New()
case HashSHA1:
hashers[t] = sha1.New()
default:
err := fmt.Sprintf("internal error: Unsupported hash type %v", t)
panic(err)
}
}
return hashers, nil
}
// hashToMultiWriter will return a set of hashers into a
// single multiwriter, where one write will update all
// the hashers.
func hashToMultiWriter(h map[HashType]hash.Hash) io.Writer {
// Convert to to slice
var w = make([]io.Writer, 0, len(h))
for _, v := range h {
w = append(w, v)
}
return io.MultiWriter(w...)
}
// A MultiHasher will construct various hashes on
// all incoming writes.
type MultiHasher struct {
io.Writer
h map[HashType]hash.Hash // Hashes
}
// NewMultiHasher will return a hash writer that will write all
// supported hash types.
func NewMultiHasher() *MultiHasher {
h, err := NewMultiHasherTypes(SupportedHashes)
if err != nil {
panic("internal error: could not create multihasher")
}
return h
}
// NewMultiHasherTypes will return a hash writer that will write
// the requested hash types.
func NewMultiHasherTypes(set HashSet) (*MultiHasher, error) {
hashers, err := hashFromTypes(set)
if err != nil {
return nil, err
}
m := MultiHasher{h: hashers, Writer: hashToMultiWriter(hashers)}
return &m, nil
}
// Sums returns the sums of all accumulated hashes as hex encoded
// strings.
func (m *MultiHasher) Sums() map[HashType]string {
dst := make(map[HashType]string)
for k, v := range m.h {
dst[k] = hex.EncodeToString(v.Sum(nil))
}
return dst
}
// A HashSet Indicates one or more hash types.
type HashSet int
// NewHashSet will create a new hash set with the hash types supplied
func NewHashSet(t ...HashType) HashSet {
h := HashSet(HashNone)
return h.Add(t...)
}
// Add one or more hash types to the set.
// Returns the modified hash set.
func (h *HashSet) Add(t ...HashType) HashSet {
for _, v := range t {
*h |= HashSet(v)
}
return *h
}
// Contains returns true if the
func (h HashSet) Contains(t HashType) bool {
return int(h)&int(t) != 0
}
// Overlap returns the overlapping hash types
func (h HashSet) Overlap(t HashSet) HashSet {
return HashSet(int(h) & int(t))
}
// SubsetOf will return true if all types of h
// is present in the set c
func (h HashSet) SubsetOf(c HashSet) bool {
return int(h)|int(c) == int(c)
}
// GetOne will return a hash type.
// Currently the first is returned, but it could be
// improved to return the strongest.
func (h HashSet) GetOne() HashType {
v := int(h)
i := uint(0)
for v != 0 {
if v&1 != 0 {
return HashType(1 << i)
}
i++
v >>= 1
}
return HashType(HashNone)
}
// Array returns an array of all hash types in the set
func (h HashSet) Array() (ht []HashType) {
v := int(h)
i := uint(0)
for v != 0 {
if v&1 != 0 {
ht = append(ht, HashType(1<<i))
}
i++
v >>= 1
}
return ht
}
// Count returns the number of hash types in the set
func (h HashSet) Count() int {
if int(h) == 0 {
return 0
}
// credit: https://code.google.com/u/arnehormann/
x := uint64(h)
x -= (x >> 1) & 0x5555555555555555
x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
x += x >> 4
x &= 0x0f0f0f0f0f0f0f0f
x *= 0x0101010101010101
return int(x >> 56)
}
// String returns a string representation of the hash set.
// The function will panic if it contains an unknown type.
func (h HashSet) String() string {
a := h.Array()
var r []string
for _, v := range a {
r = append(r, v.String())
}
return "[" + strings.Join(r, ", ") + "]"
}

260
fs/hash_test.go Normal file
View File

@@ -0,0 +1,260 @@
package fs_test
import (
"bytes"
"io"
"testing"
"github.com/ncw/rclone/fs"
)
func TestHashSet(t *testing.T) {
var h fs.HashSet
if h.Count() != 0 {
t.Fatalf("expected empty set to have 0 elements, got %d", h.Count())
}
a := h.Array()
if len(a) != 0 {
t.Fatalf("expected empty slice, got %d", len(a))
}
h = h.Add(fs.HashMD5)
if h.Count() != 1 {
t.Fatalf("expected 1 element, got %d", h.Count())
}
if h.GetOne() != fs.HashMD5 {
t.Fatalf("expected HashMD5, got %v", h.GetOne())
}
a = h.Array()
if len(a) != 1 {
t.Fatalf("expected 1 element, got %d", len(a))
}
if a[0] != fs.HashMD5 {
t.Fatalf("expected HashMD5, got %v", a[0])
}
// Test overlap, with all hashes
h = h.Overlap(fs.SupportedHashes)
if h.Count() != 1 {
t.Fatalf("expected 1 element, got %d", h.Count())
}
if h.GetOne() != fs.HashMD5 {
t.Fatalf("expected HashMD5, got %v", h.GetOne())
}
if !h.SubsetOf(fs.SupportedHashes) {
t.Fatalf("expected to be subset of all hashes")
}
if !h.SubsetOf(fs.NewHashSet(fs.HashMD5)) {
t.Fatalf("expected to be subset of itself")
}
h = h.Add(fs.HashSHA1)
if h.Count() != 2 {
t.Fatalf("expected 2 elements, got %d", h.Count())
}
one := h.GetOne()
if !(one == fs.HashMD5 || one == fs.HashSHA1) {
t.Fatalf("expected to be either MD5 or SHA1, got %v", one)
}
if !h.SubsetOf(fs.SupportedHashes) {
t.Fatalf("expected to be subset of all hashes")
}
if h.SubsetOf(fs.NewHashSet(fs.HashMD5)) {
t.Fatalf("did not expect to be subset of only MD5")
}
if h.SubsetOf(fs.NewHashSet(fs.HashSHA1)) {
t.Fatalf("did not expect to be subset of only SHA1")
}
if !h.SubsetOf(fs.NewHashSet(fs.HashMD5, fs.HashSHA1)) {
t.Fatalf("expected to be subset of MD5/SHA1")
}
a = h.Array()
if len(a) != 2 {
t.Fatalf("expected 2 elements, got %d", len(a))
}
ol := h.Overlap(fs.NewHashSet(fs.HashMD5))
if ol.Count() != 1 {
t.Fatalf("expected 1 element overlap, got %d", ol.Count())
}
if !ol.Contains(fs.HashMD5) {
t.Fatalf("expected overlap to be MD5, got %v", ol)
}
if ol.Contains(fs.HashSHA1) {
t.Fatalf("expected overlap NOT to contain SHA1, got %v", ol)
}
ol = h.Overlap(fs.NewHashSet(fs.HashMD5, fs.HashSHA1))
if ol.Count() != 2 {
t.Fatalf("expected 2 element overlap, got %d", ol.Count())
}
if !ol.Contains(fs.HashMD5) {
t.Fatalf("expected overlap to contain MD5, got %v", ol)
}
if !ol.Contains(fs.HashSHA1) {
t.Fatalf("expected overlap to contain SHA1, got %v", ol)
}
}
type hashTest struct {
input []byte
output map[fs.HashType]string
}
var hashTestSet = []hashTest{
{
input: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
output: map[fs.HashType]string{
fs.HashMD5: "bf13fc19e5151ac57d4252e0e0f87abe",
fs.HashSHA1: "3ab6543c08a75f292a5ecedac87ec41642d12166",
},
},
// Empty data set
{
input: []byte{},
output: map[fs.HashType]string{
fs.HashMD5: "d41d8cd98f00b204e9800998ecf8427e",
fs.HashSHA1: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
},
},
}
func TestMultiHasher(t *testing.T) {
for _, test := range hashTestSet {
mh := fs.NewMultiHasher()
n, err := io.Copy(mh, bytes.NewBuffer(test.input))
if err != nil {
t.Fatal(err)
}
if int(n) != len(test.input) {
t.Fatalf("copy mismatch: %d != %d", n, len(test.input))
}
sums := mh.Sums()
for k, v := range sums {
expect, ok := test.output[k]
if !ok {
t.Errorf("Unknown hash type %v, sum: %q", k, v)
}
if expect != v {
t.Errorf("hash %v mismatch %q != %q", k, v, expect)
}
}
// Test that all are present
for k, v := range test.output {
expect, ok := sums[k]
if !ok {
t.Errorf("did not calculate hash type %v, sum: %q", k, v)
}
if expect != v {
t.Errorf("hash %d mismatch %q != %q", k, v, expect)
}
}
}
}
func TestMultiHasherTypes(t *testing.T) {
h := fs.HashSHA1
for _, test := range hashTestSet {
mh, err := fs.NewMultiHasherTypes(fs.NewHashSet(h))
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(mh, bytes.NewBuffer(test.input))
if err != nil {
t.Fatal(err)
}
if int(n) != len(test.input) {
t.Fatalf("copy mismatch: %d != %d", n, len(test.input))
}
sums := mh.Sums()
if len(sums) != 1 {
t.Fatalf("expected 1 sum, got %d", len(sums))
}
expect := test.output[h]
if expect != sums[h] {
t.Errorf("hash %v mismatch %q != %q", h, sums[h], expect)
}
}
}
func TestHashStream(t *testing.T) {
for _, test := range hashTestSet {
sums, err := fs.HashStream(bytes.NewBuffer(test.input))
if err != nil {
t.Fatal(err)
}
for k, v := range sums {
expect, ok := test.output[k]
if !ok {
t.Errorf("Unknown hash type %v, sum: %q", k, v)
}
if expect != v {
t.Errorf("hash %v mismatch %q != %q", k, v, expect)
}
}
// Test that all are present
for k, v := range test.output {
expect, ok := sums[k]
if !ok {
t.Errorf("did not calculate hash type %v, sum: %q", k, v)
}
if expect != v {
t.Errorf("hash %v mismatch %q != %q", k, v, expect)
}
}
}
}
func TestHashStreamTypes(t *testing.T) {
h := fs.HashSHA1
for _, test := range hashTestSet {
sums, err := fs.HashStreamTypes(bytes.NewBuffer(test.input), fs.NewHashSet(h))
if err != nil {
t.Fatal(err)
}
if len(sums) != 1 {
t.Fatalf("expected 1 sum, got %d", len(sums))
}
expect := test.output[h]
if expect != sums[h] {
t.Errorf("hash %d mismatch %q != %q", h, sums[h], expect)
}
}
}
func TestHashSetStringer(t *testing.T) {
h := fs.NewHashSet(fs.HashSHA1, fs.HashMD5)
s := h.String()
expect := "[MD5, SHA-1]"
if s != expect {
t.Errorf("unexpected stringer: was %q, expected %q", s, expect)
}
h = fs.NewHashSet(fs.HashSHA1)
s = h.String()
expect = "[SHA-1]"
if s != expect {
t.Errorf("unexpected stringer: was %q, expected %q", s, expect)
}
h = fs.NewHashSet()
s = h.String()
expect = "[]"
if s != expect {
t.Errorf("unexpected stringer: was %q, expected %q", s, expect)
}
}
func TestHashStringer(t *testing.T) {
h := fs.HashMD5
s := h.String()
expect := "MD5"
if s != expect {
t.Errorf("unexpected stringer: was %q, expected %q", s, expect)
}
h = fs.HashNone
s = h.String()
expect = "None"
if s != expect {
t.Errorf("unexpected stringer: was %q, expected %q", s, expect)
}
}

View File

@@ -71,12 +71,13 @@ func (f *Limited) NewFsObject(remote string) Object {
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Limited) Put(in io.Reader, remote string, modTime time.Time, size int64) (Object, error) {
func (f *Limited) Put(in io.Reader, src ObjectInfo) (Object, error) {
remote := src.Remote()
obj := f.NewFsObject(remote)
if obj == nil {
return nil, fmt.Errorf("Can't create %q in limited fs", remote)
}
return obj, obj.Update(in, modTime, size)
return obj, obj.Update(in, src)
}
// Mkdir make the directory (container, bucket)
@@ -96,6 +97,11 @@ func (f *Limited) Precision() time.Duration {
return f.fs.Precision()
}
// Hashes returns the supported hash sets.
func (f *Limited) Hashes() HashSet {
return f.fs.Hashes()
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given

View File

@@ -48,10 +48,14 @@ func (t *LoggedTransport) RoundTrip(req *http.Request) (resp *http.Response, err
log.Println(string(buf))
log.Println(separatorReq)
resp, err = t.wrapped.RoundTrip(req)
buf, _ = httputil.DumpResponse(resp, t.logBody)
log.Println(separatorResp)
log.Println("HTTP RESPONSE")
log.Println(string(buf))
if err != nil {
log.Printf("Error: %v\n", err)
} else {
buf, _ = httputil.DumpResponse(resp, t.logBody)
log.Println(string(buf))
}
log.Println(separatorResp)
return resp, err
}

146
fs/make_test_files.go Normal file
View File

@@ -0,0 +1,146 @@
// +build ignore
// Build a directory structure with the required number of files in
//
// Run with go run make_test_files.go [flag] <directory>
package main
import (
cryptrand "crypto/rand"
"flag"
"io"
"log"
"math/rand"
"os"
"path/filepath"
)
var (
// Flags
numberOfFiles = flag.Int("n", 1000, "Number of files to create")
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory heirachy")
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
maxFileNameLength = flag.Int("max-name-length", 12, "Maximum size of files to create")
directoriesToCreate int
totalDirectories int
fileNames = map[string]struct{}{} // keep a note of which file name we've used already
)
// randomString create a random string for test purposes
func randomString(n int) string {
const (
vowel = "aeiou"
consonant = "bcdfghjklmnpqrstvwxyz"
digit = "0123456789"
)
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
out := make([]byte, n)
p := 0
for i := range out {
source := pattern[p]
p = (p + 1) % len(pattern)
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
// fileName creates a unique random file or directory name
func fileName() (name string) {
for {
length := rand.Intn(*maxFileNameLength-*minFileNameLength) + *minFileNameLength
name = randomString(length)
if _, found := fileNames[name]; !found {
break
}
}
fileNames[name] = struct{}{}
return name
}
// dir is a directory in the directory heirachy being built up
type dir struct {
name string
depth int
children []*dir
parent *dir
}
// Create a random directory heirachy under d
func (d *dir) createDirectories() {
for totalDirectories < directoriesToCreate {
newDir := &dir{
name: fileName(),
depth: d.depth + 1,
parent: d,
}
d.children = append(d.children, newDir)
totalDirectories++
switch rand.Intn(4) {
case 0:
if d.depth < *maxDepth {
newDir.createDirectories()
}
case 1:
return
}
}
return
}
// list the directory heirachy
func (d *dir) list(path string, output []string) []string {
dirPath := path + "/" + d.name
output = append(output, dirPath)
for _, subDir := range d.children {
output = subDir.list(dirPath, output)
}
return output
}
// writeFile writes a random file at dir/name
func writeFile(dir, name string) {
err := os.MkdirAll(dir, 0777)
if err != nil {
log.Fatalf("Failed to make directory %q: %v", dir, err)
}
path := filepath.Join(dir, name)
fd, err := os.Create(path)
if err != nil {
log.Fatalf("Failed to open file %q: %v", path, err)
}
size := rand.Int63n(*maxFileSize-*minFileSize) + *minFileSize
_, err = io.CopyN(fd, cryptrand.Reader, size)
if err != nil {
log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err)
}
err = fd.Close()
if err != nil {
log.Fatalf("Failed to close file %q: %v", path, err)
}
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 1 {
log.Fatalf("Require 1 directory argument")
}
outputDirectory := args[0]
log.Printf("Output dir %q", outputDirectory)
directoriesToCreate = *numberOfFiles / *averageFilesPerDirectory
log.Printf("directoriesToCreate %v", directoriesToCreate)
root := &dir{name: outputDirectory, depth: 1}
for totalDirectories < directoriesToCreate {
root.createDirectories()
}
dirs := root.list("", []string{})
for i := 0; i < *numberOfFiles; i++ {
dir := dirs[rand.Intn(len(dirs))]
writeFile(dir, fileName())
}
}

View File

@@ -5,11 +5,16 @@ package fs
import (
"fmt"
"io"
"log"
"mime"
"path"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"golang.org/x/text/unicode/norm"
)
// CalculateModifyWindow works out modify window for Fses passed in -
@@ -33,48 +38,58 @@ func CalculateModifyWindow(fs ...Fs) {
Debug(fs[0], "Modify window is %s", Config.ModifyWindow)
}
// Md5sumsEqual checks to see if src == dst, but ignores empty strings
func Md5sumsEqual(src, dst string) bool {
// HashEquals checks to see if src == dst, but ignores empty strings
// and returns true if either is empty.
func HashEquals(src, dst string) bool {
if src == "" || dst == "" {
return true
}
return src == dst
}
// CheckMd5sums checks the two files to see if the MD5sums are the same
// CheckHashes checks the two files to see if they have common
// known hash types and compares them
//
// Returns two bools, the first of which is equality and the second of
// which is true if either of the MD5SUMs were unset.
// Returns
//
// May return an error which will already have been logged
// equal - which is equality of the hashes
//
// hash - the HashType. This is HashNone if either of the hashes were
// unset or a compatible hash couldn't be found.
//
// err - may return an error which will already have been logged
//
// If an error is returned it will return equal as false
func CheckMd5sums(src, dst Object) (equal bool, unset bool, err error) {
srcMd5, err := src.Md5sum()
func CheckHashes(src, dst Object) (equal bool, hash HashType, err error) {
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
// Debug(nil, "Shared hashes: %v", common)
if common.Count() == 0 {
return true, HashNone, nil
}
hash = common.GetOne()
srcHash, err := src.Hash(hash)
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to calculate src md5: %s", err)
return false, false, err
ErrorLog(src, "Failed to calculate src hash: %s", err)
return false, hash, err
}
if srcMd5 == "" {
return true, true, nil
if srcHash == "" {
return true, HashNone, nil
}
dstMd5, err := dst.Md5sum()
dstHash, err := dst.Hash(hash)
if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to calculate dst md5: %s", err)
return false, false, err
ErrorLog(dst, "Failed to calculate dst hash: %s", err)
return false, hash, err
}
if dstMd5 == "" {
return true, true, nil
if dstHash == "" {
return true, HashNone, nil
}
// Debug("Src MD5 %s", srcMd5)
// Debug("Dst MD5 %s", obj.Hash)
return Md5sumsEqual(srcMd5, dstMd5), false, nil
return srcHash == dstHash, hash, nil
}
// Equal checks to see if the src and dst objects are equal by looking at
// size, mtime and MD5SUM
// size, mtime and hash
//
// If the src and dst size are different then it is considered to be
// not equal. If --size-only is in effect then this is the only check
@@ -84,7 +99,7 @@ func CheckMd5sums(src, dst Object) (equal bool, unset bool, err error) {
// considered to be equal. This check is skipped if using --checksum.
//
// If the size is the same and mtime is different, unreadable or
// --checksum is set and the MD5SUM is the same then the file is
// --checksum is set and the hash is the same then the file is
// considered to be equal. In this case the mtime on the dst is
// updated if --checksum is not set.
//
@@ -120,23 +135,30 @@ func Equal(src, dst Object) bool {
}
// mtime is unreadable or different but size is the same so
// check the MD5SUM
same, md5unset, _ := CheckMd5sums(src, dst)
// check the hash
same, hash, _ := CheckHashes(src, dst)
if !same {
Debug(src, "Md5sums differ")
Debug(src, "Hash differ")
return false
}
if !Config.CheckSum {
// Size and MD5 the same but mtime different so update the
// Size and hash the same but mtime different so update the
// mtime of the dst object here
dst.SetModTime(srcModTime)
err := dst.SetModTime(srcModTime)
if err == ErrorCantSetModTime {
Debug(src, "src and dst identical but can't set mod time without re-uploading")
return false
} else if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to read set modification time: %s", err)
}
}
if md5unset {
if hash == HashNone {
Debug(src, "Size of src and dst objects identical")
} else {
Debug(src, "Size and MD5SUM of src and dst objects identical")
Debug(src, "Size and %v of src and dst objects identical", hash)
}
return true
}
@@ -144,7 +166,7 @@ func Equal(src, dst Object) bool {
// MimeType returns a guess at the mime type from the extension
func MimeType(o Object) string {
mimeType := mime.TypeByExtension(path.Ext(o.Remote()))
if mimeType == "" {
if !strings.ContainsRune(mimeType, '/') {
mimeType = "application/octet-stream"
}
return mimeType
@@ -172,7 +194,7 @@ func removeFailedCopy(dst Object) bool {
// call Copy() with dst nil on a pre-existing file then some filing
// systems (eg Drive) may duplicate the file.
func Copy(f Fs, dst, src Object) {
const maxTries = 10
maxTries := Config.LowLevelRetries
tries := 0
doUpdate := dst != nil
var err, inErr error
@@ -208,17 +230,17 @@ tryAgain:
if doUpdate {
actionTaken = "Copied (updated existing)"
err = dst.Update(in, src.ModTime(), src.Size())
err = dst.Update(in, src)
} else {
actionTaken = "Copied (new)"
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
dst, err = f.Put(in, src)
}
inErr = in.Close()
}
// Retry if err returned a retry error
if r, ok := err.(Retry); ok && r.Retry() && tries < maxTries {
tries++
Log(src, "Received error: %v - retrying %d/%d", err, tries, maxTries)
Log(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
if removeFailedCopy(dst) {
// If we removed dst, then nil it out and note we are not updating
dst = nil
@@ -245,20 +267,27 @@ tryAgain:
return
}
// Verify md5sums are the same after transfer - ignoring blank md5sums
if !Config.SizeOnly {
srcMd5sum, md5sumErr := src.Md5sum()
if md5sumErr != nil {
// Verify hashes are the same after transfer - ignoring blank hashes
// TODO(klauspost): This could be extended, so we always create a hash type matching
// the destination, and calculate it while sending.
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
// Debug(src, "common hashes: %v", common)
if !Config.SizeOnly && common.Count() > 0 {
// Get common hash type
hashType := common.GetOne()
srcSum, err := src.Hash(hashType)
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to read md5sum: %s", md5sumErr)
} else if srcMd5sum != "" {
dstMd5sum, md5sumErr := dst.Md5sum()
if md5sumErr != nil {
ErrorLog(src, "Failed to read src hash: %s", err)
} else if srcSum != "" {
dstSum, err := dst.Hash(hashType)
if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to read md5sum: %s", md5sumErr)
} else if !Md5sumsEqual(srcMd5sum, dstMd5sum) {
ErrorLog(dst, "Failed to read hash: %s", err)
} else if !HashEquals(srcSum, dstSum) {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
err = fmt.Errorf("Corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
ErrorLog(dst, "%s", err)
removeFailedCopy(dst)
return
@@ -281,17 +310,56 @@ func checkOne(pair ObjectPair, out ObjectPairChan) {
if !src.Storable() {
return
}
// Check to see if changed or not
if Equal(src, dst) {
Debug(src, "Unchanged skipping")
// If we should ignore existing files, don't transfer
if Config.IgnoreExisting {
Debug(src, "Destination exists, skipping")
return
}
// If we should upload unconditionally
if Config.IgnoreTimes {
Debug(src, "Uploading unconditionally as --ignore-times is in use")
out <- pair
return
}
// If UpdateOlder is in effect, skip if dst is newer than src
if Config.UpdateOlder {
srcModTime := src.ModTime()
dstModTime := dst.ModTime()
dt := dstModTime.Sub(srcModTime)
// If have a mutually agreed precision then use that
modifyWindow := Config.ModifyWindow
if modifyWindow == ModTimeNotSupported {
// Otherwise use 1 second as a safe default as
// the resolution of the time a file was
// uploaded.
modifyWindow = time.Second
}
switch {
case dt >= modifyWindow:
Debug(src, "Destination is newer than source, skipping")
return
case dt <= -modifyWindow:
Debug(src, "Destination is older than source, transferring")
default:
if src.Size() == dst.Size() {
Debug(src, "Destination mod time is within %v of source and sizes identical, skipping", modifyWindow)
return
}
Debug(src, "Destination mod time is within %v of source but sizes differ, transferring", modifyWindow)
}
} else {
// Check to see if changed or not
if Equal(src, dst) {
Debug(src, "Unchanged skipping")
return
}
}
out <- pair
}
// PairChecker reads Objects~s on in send to out if they need transferring.
//
// FIXME potentially doing lots of MD5SUMS at once
// FIXME potentially doing lots of hashes at once
func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
defer wg.Done()
for pair := range in {
@@ -309,7 +377,7 @@ func PairCopier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
src := pair.src
Stats.Transferring(src)
if Config.DryRun {
Debug(src, "Not copying as --dry-run")
Log(src, "Not copying as --dry-run")
} else {
Copy(fdst, pair.dst, src)
}
@@ -328,8 +396,8 @@ func PairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
dst := pair.dst
Stats.Transferring(src)
if Config.DryRun {
Debug(src, "Not moving as --dry-run")
} else if haveMover {
Log(src, "Not moving as --dry-run")
} else if haveMover && src.Fs().Name() == fdst.Name() {
// Delete destination if it exists
if pair.dst != nil {
err := dst.Remove()
@@ -352,6 +420,23 @@ func PairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
}
}
// DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors.
func DeleteFile(dst Object) {
if Config.DryRun {
Log(dst, "Not deleting as --dry-run")
} else {
Stats.Checking(dst)
err := dst.Remove()
Stats.DoneChecking(dst)
if err != nil {
Stats.Error()
ErrorLog(dst, "Couldn't delete: %s", err)
} else {
Debug(dst, "Deleted")
}
}
}
// DeleteFiles removes all the files passed in the channel
func DeleteFiles(toBeDeleted ObjectsChan) {
var wg sync.WaitGroup
@@ -360,19 +445,7 @@ func DeleteFiles(toBeDeleted ObjectsChan) {
go func() {
defer wg.Done()
for dst := range toBeDeleted {
if Config.DryRun {
Debug(dst, "Not deleting as --dry-run")
} else {
Stats.Checking(dst)
err := dst.Remove()
Stats.DoneChecking(dst)
if err != nil {
Stats.Error()
ErrorLog(dst, "Couldn't delete: %s", err)
} else {
Debug(dst, "Deleted")
}
}
DeleteFile(dst)
}
}()
}
@@ -380,22 +453,29 @@ func DeleteFiles(toBeDeleted ObjectsChan) {
wg.Wait()
}
// Read a map of Object.Remote to Object for the given Fs
func readFilesMap(fs Fs, obeyInclude bool) map[string]Object {
// Read a map of Object.Remote to Object for the given Fs.
// If includeAll is specified all files will be added,
// otherwise only files passing the filter will be added.
func readFilesMap(fs Fs, includeAll bool) map[string]Object {
files := make(map[string]Object)
normalised := make(map[string]struct{})
for o := range fs.List() {
remote := o.Remote()
normalisedRemote := strings.ToLower(norm.NFC.String(remote))
if _, ok := files[remote]; !ok {
// Make sure we don't delete excluded files if not required
if !obeyInclude || Config.Filter.DeleteExcluded || Config.Filter.Include(remote, o.Size()) {
if includeAll || Config.Filter.IncludeObject(o) {
files[remote] = o
if _, ok := normalised[normalisedRemote]; ok {
Log(o, "Warning: File found with same name but different case on %v", o.Fs())
}
} else {
Debug(o, "Excluded from sync (and deletion)")
}
} else {
Log(o, "Duplicate file detected")
}
normalised[normalisedRemote] = struct{}{}
}
return files
}
@@ -416,17 +496,87 @@ func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
return nil
}
err := fdst.Mkdir()
err := Mkdir(fdst)
if err != nil {
Stats.Error()
return err
}
Log(fdst, "Building file list")
// Read the destination files first
// FIXME could do this in parallel and make it use less memory
delFiles := readFilesMap(fdst, true)
// Read the files of both source and destination
var listWg sync.WaitGroup
listWg.Add(2)
var dstFiles map[string]Object
var srcFiles map[string]Object
var srcObjects = make(ObjectsChan, Config.Transfers)
// Read dst files including excluded files if DeleteExcluded is set
go func() {
dstFiles = readFilesMap(fdst, Config.Filter.DeleteExcluded)
listWg.Done()
}()
// Read src file not including excluded files
go func() {
srcFiles = readFilesMap(fsrc, false)
listWg.Done()
for _, v := range srcFiles {
srcObjects <- v
}
close(srcObjects)
}()
startDeletion := make(chan struct{}, 0)
// Delete files if asked
var delWg sync.WaitGroup
delWg.Add(1)
go func() {
if !Delete {
return
}
defer func() {
Debug(fdst, "Deletion finished")
delWg.Done()
}()
_ = <-startDeletion
Debug(fdst, "Starting deletion")
if Stats.Errored() {
ErrorLog(fdst, "Not deleting files as there were IO errors")
return
}
// Delete the spare files
toDelete := make(ObjectsChan, Config.Transfers)
go func() {
for key, fs := range dstFiles {
_, exists := srcFiles[key]
if !exists {
toDelete <- fs
}
}
close(toDelete)
}()
DeleteFiles(toDelete)
}()
// Wait for all files to be read
listWg.Wait()
// Start deleting, unless we must delete after transfer
if Delete && !Config.DeleteAfter {
close(startDeletion)
}
// If deletes must finish before starting transfers, we must wait now.
if Delete && Config.DeleteBefore {
Log(fdst, "Waiting for deletes to finish (before)")
delWg.Wait()
}
// Read source files checking them off against dest files
toBeChecked := make(ObjectPairChan, Config.Transfers)
@@ -449,18 +599,13 @@ func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
}
go func() {
for src := range fsrc.List() {
for src := range srcObjects {
remote := src.Remote()
if !Config.Filter.Include(remote, src.Size()) {
Debug(src, "Excluding from sync")
if dst, dstFound := dstFiles[remote]; dstFound {
toBeChecked <- ObjectPair{src, dst}
} else {
if dst, dstFound := delFiles[remote]; dstFound {
delete(delFiles, remote)
toBeChecked <- ObjectPair{src, dst}
} else {
// No need to check since doesn't exist
toBeUploaded <- ObjectPair{src, nil}
}
// No need to check since doesn't exist
toBeUploaded <- ObjectPair{src, nil}
}
}
close(toBeChecked)
@@ -472,23 +617,16 @@ func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
Log(fdst, "Waiting for transfers to finish")
copierWg.Wait()
// Delete files if asked
if Delete {
if Stats.Errored() {
ErrorLog(fdst, "Not deleting files as there were IO errors")
return nil
}
// Delete the spare files
toDelete := make(ObjectsChan, Config.Transfers)
go func() {
for _, fs := range delFiles {
toDelete <- fs
}
close(toDelete)
}()
DeleteFiles(toDelete)
// If deleting after, start deletion now
if Delete && Config.DeleteAfter {
close(startDeletion)
}
// Unless we have already waited, wait for deletion to finish.
if Delete && !Config.DeleteBefore {
Log(fdst, "Waiting for deletes to finish (during+after)")
delWg.Wait()
}
return nil
}
@@ -509,8 +647,8 @@ func MoveDir(fdst, fsrc Fs) error {
return nil
}
// First attempt to use DirMover
if fdstDirMover, ok := fdst.(DirMover); ok && fsrc.Name() == fdst.Name() {
// First attempt to use DirMover if exists, same Fs and no filters are active
if fdstDirMover, ok := fdst.(DirMover); ok && fsrc.Name() == fdst.Name() && Config.Filter.InActive() {
err := fdstDirMover.DirMove(fsrc)
Debug(fdst, "Using server side directory move")
switch err {
@@ -532,20 +670,76 @@ func MoveDir(fdst, fsrc Fs) error {
ErrorLog(fdst, "Not deleting files as there were IO errors")
return err
}
return Purge(fsrc)
// If no filters then purge
if Config.Filter.InActive() {
return Purge(fsrc)
}
// Otherwise remove any remaining files obeying filters
err = Delete(fsrc)
if err != nil {
return err
}
// and try to remove the directory if empty - ignoring error
_ = TryRmdir(fsrc)
return nil
}
// Check the files in fsrc and fdst according to Size and MD5SUM
// checkIdentical checks to see if dst and src are identical
//
// it returns true if differences were found
func checkIdentical(dst, src Object) bool {
Stats.Checking(src)
defer Stats.DoneChecking(src)
if src.Size() != dst.Size() {
Stats.Error()
ErrorLog(src, "Sizes differ")
return true
}
if !Config.SizeOnly {
same, _, err := CheckHashes(src, dst)
if err != nil {
// CheckHashes will log and count errors
return true
}
if !same {
Stats.Error()
ErrorLog(src, "Md5sums differ")
return true
}
}
Debug(src, "OK")
return false
}
// Check the files in fsrc and fdst according to Size and hash
func Check(fdst, fsrc Fs) error {
Log(fdst, "Building file list")
differences := int32(0)
var (
wg sync.WaitGroup
dstFiles, srcFiles map[string]Object
)
// Read the destination files first
// FIXME could do this in parallel and make it use less memory
dstFiles := readFilesMap(fdst, false)
wg.Add(2)
go func() {
defer wg.Done()
// Read the destination files
Log(fdst, "Building file list")
dstFiles = readFilesMap(fdst, false)
Debug(fdst, "Done building file list")
}()
// Read the source files checking them against dstFiles
// FIXME could do this in parallel and make it use less memory
srcFiles := readFilesMap(fsrc, false)
go func() {
defer wg.Done()
// Read the source files
Log(fsrc, "Building file list")
srcFiles = readFilesMap(fsrc, false)
Debug(fdst, "Done building file list")
}()
wg.Wait()
// FIXME could do this as it goes along and make it use less
// memory.
// Move all the common files into commonFiles and delete then
// from srcFiles and dstFiles
@@ -562,12 +756,14 @@ func Check(fdst, fsrc Fs) error {
for _, dst := range dstFiles {
Stats.Error()
ErrorLog(dst, "File not in %v", fsrc)
atomic.AddInt32(&differences, 1)
}
Log(fsrc, "%d files not in %s", len(srcFiles), fdst)
for _, src := range srcFiles {
Stats.Error()
ErrorLog(src, "File not in %v", fdst)
atomic.AddInt32(&differences, 1)
}
checks := make(chan []Object, Config.Transfers)
@@ -584,24 +780,9 @@ func Check(fdst, fsrc Fs) error {
go func() {
defer checkerWg.Done()
for check := range checks {
dst, src := check[0], check[1]
Stats.Checking(src)
if src.Size() != dst.Size() {
Stats.DoneChecking(src)
Stats.Error()
ErrorLog(src, "Sizes differ")
continue
if checkIdentical(check[0], check[1]) {
atomic.AddInt32(&differences, 1)
}
same, _, err := CheckMd5sums(src, dst)
Stats.DoneChecking(src)
if err != nil {
continue
}
if !same {
Stats.Error()
ErrorLog(src, "Md5sums differ")
}
Debug(src, "OK")
}
}()
}
@@ -609,8 +790,8 @@ func Check(fdst, fsrc Fs) error {
Log(fdst, "Waiting for checks to finish")
checkerWg.Wait()
Log(fdst, "%d differences found", Stats.GetErrors())
if Stats.GetErrors() > 0 {
return fmt.Errorf("%d differences found", Stats.GetErrors())
if differences > 0 {
return fmt.Errorf("%d differences found", differences)
}
return nil
}
@@ -626,7 +807,9 @@ func ListFn(f Fs, fn func(Object)) error {
go func() {
defer wg.Done()
for o := range in {
fn(o)
if Config.Filter.IncludeObject(o) {
fn(o)
}
}
}()
}
@@ -648,7 +831,7 @@ func syncFprintf(w io.Writer, format string, a ...interface{}) {
// List the Fs to the supplied writer
//
// Shows size and path
// Shows size and path - obeys includes and excludes
//
// Lists in parallel which may get them out of order
func List(f Fs, w io.Writer) error {
@@ -659,7 +842,7 @@ func List(f Fs, w io.Writer) error {
// ListLong lists the Fs to the supplied writer
//
// Shows size, mod time and path
// Shows size, mod time and path - obeys includes and excludes
//
// Lists in parallel which may get them out of order
func ListLong(f Fs, w io.Writer) error {
@@ -673,23 +856,41 @@ func ListLong(f Fs, w io.Writer) error {
// Md5sum list the Fs to the supplied writer
//
// Produces the same output as the md5sum command
// Produces the same output as the md5sum command - obeys includes and
// excludes
//
// Lists in parallel which may get them out of order
func Md5sum(f Fs, w io.Writer) error {
return hashLister(HashMD5, f, w)
}
// Sha1sum list the Fs to the supplied writer
//
// Obeys includes and excludes
//
// Lists in parallel which may get them out of order
func Sha1sum(f Fs, w io.Writer) error {
return hashLister(HashSHA1, f, w)
}
func hashLister(ht HashType, f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
Stats.Checking(o)
md5sum, err := o.Md5sum()
sum, err := o.Hash(ht)
Stats.DoneChecking(o)
if err != nil {
Debug(o, "Failed to read MD5: %v", err)
md5sum = "ERROR"
if err == ErrHashUnsupported {
sum = "UNSUPPORTED"
} else if err != nil {
Debug(o, "Failed to read %v: %v", ht, err)
sum = "ERROR"
}
syncFprintf(w, "%32s %s\n", md5sum, o.Remote())
syncFprintf(w, "%*s %s\n", HashWidth[ht], sum, o.Remote())
})
}
// Count counts the objects and their sizes in the Fs
//
// Obeys includes and excludes
func Count(f Fs) (objects int64, size int64, err error) {
err = ListFn(f, func(o Object) {
atomic.AddInt64(&objects, 1)
@@ -708,6 +909,10 @@ func ListDir(f Fs, w io.Writer) error {
// Mkdir makes a destination directory or container
func Mkdir(f Fs) error {
if Config.DryRun {
Log(f, "Not making directory as dry run is set")
return nil
}
err := f.Mkdir()
if err != nil {
Stats.Error()
@@ -716,18 +921,24 @@ func Mkdir(f Fs) error {
return nil
}
// Rmdir removes a container but not if not empty
func Rmdir(f Fs) error {
// TryRmdir removes a container but not if not empty. It doesn't
// count errors but may return one.
func TryRmdir(f Fs) error {
if Config.DryRun {
Log(f, "Not deleting as dry run is set")
} else {
err := f.Rmdir()
if err != nil {
Stats.Error()
return err
}
return nil
}
return nil
return f.Rmdir()
}
// Rmdir removes a container but not if not empty
func Rmdir(f Fs) error {
err := TryRmdir(f)
if err != nil {
Stats.Error()
return err
}
return err
}
// Purge removes a container and all of its contents
@@ -739,7 +950,7 @@ func Purge(f Fs) error {
if purger, ok := f.(Purger); ok {
doFallbackPurge = false
if Config.DryRun {
Debug(f, "Not purging as --dry-run set")
Log(f, "Not purging as --dry-run set")
} else {
err = purger.Purge()
if err == ErrorCantPurge {
@@ -758,3 +969,183 @@ func Purge(f Fs) error {
}
return nil
}
// Delete removes all the contents of a container. Unlike Purge, it
// obeys includes and excludes.
func Delete(f Fs) error {
wg := new(sync.WaitGroup)
delete := make(ObjectsChan, Config.Transfers)
wg.Add(1)
go func() {
defer wg.Done()
DeleteFiles(delete)
}()
err := ListFn(f, func(o Object) {
delete <- o
})
close(delete)
wg.Wait()
return err
}
// dedupeRename renames the objs slice to different names
func dedupeRename(remote string, objs []Object) {
f := objs[0].Fs()
mover, ok := f.(Mover)
if !ok {
log.Fatalf("Fs %v doesn't support Move", f)
}
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
for i, o := range objs {
newName := fmt.Sprintf("%s-%d%s", base, i+1, ext)
if !Config.DryRun {
newObj, err := mover.Move(o, newName)
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to rename: %v", err)
continue
}
Log(newObj, "renamed from: %v", o)
} else {
Log(remote, "Not renaming to %q as --dry-run", newName)
}
}
}
// dedupeDeleteAllButOne deletes all but the one in keep
func dedupeDeleteAllButOne(keep int, remote string, objs []Object) {
for i, o := range objs {
if i == keep {
continue
}
DeleteFile(o)
}
Log(remote, "Deleted %d extra copies", len(objs)-1)
}
// dedupeDeleteIdentical deletes all but one of identical (by hash) copies
func dedupeDeleteIdentical(remote string, objs []Object) []Object {
// See how many of these duplicates are identical
byHash := make(map[string][]Object, len(objs))
for _, o := range objs {
md5sum, err := o.Hash(HashMD5)
if err == nil {
byHash[md5sum] = append(byHash[md5sum], o)
}
}
// Delete identical duplicates, refilling obj with the ones remaining
objs = nil
for md5sum, hashObjs := range byHash {
if len(hashObjs) > 1 {
Log(remote, "Deleting %d/%d identical duplicates (md5sum %q)", len(hashObjs)-1, len(hashObjs), md5sum)
for _, o := range hashObjs[1:] {
DeleteFile(o)
}
}
objs = append(objs, hashObjs[0])
}
return objs
}
// dedupeInteractive interactively dedupes the slice of objects
func dedupeInteractive(remote string, objs []Object) {
fmt.Printf("%s: %d duplicates remain\n", remote, len(objs))
for i, o := range objs {
md5sum, err := o.Hash(HashMD5)
if err != nil {
md5sum = err.Error()
}
fmt.Printf(" %d: %12d bytes, %s, md5sum %32s\n", i+1, o.Size(), o.ModTime().Format("2006-01-02 15:04:05.000000000"), md5sum)
}
switch Command([]string{"sSkip and do nothing", "kKeep just one (choose which in next step)", "rRename all to be different (by changing file.jpg to file-1.jpg)"}) {
case 's':
case 'k':
keep := ChooseNumber("Enter the number of the file to keep", 1, len(objs))
dedupeDeleteAllButOne(keep-1, remote, objs)
case 'r':
dedupeRename(remote, objs)
}
}
type objectsSortedByModTime []Object
func (objs objectsSortedByModTime) Len() int { return len(objs) }
func (objs objectsSortedByModTime) Swap(i, j int) { objs[i], objs[j] = objs[j], objs[i] }
func (objs objectsSortedByModTime) Less(i, j int) bool {
return objs[i].ModTime().Before(objs[j].ModTime())
}
// DeduplicateMode is how the dedupe command chooses what to do
type DeduplicateMode int
// Deduplicate modes
const (
DeduplicateInteractive DeduplicateMode = iota // interactively ask the user
DeduplicateSkip // skip all conflicts
DeduplicateFirst // choose the first object
DeduplicateNewest // choose the newest object
DeduplicateOldest // choose the oldest object
DeduplicateRename // rename the objects
)
func (mode DeduplicateMode) String() string {
switch mode {
case DeduplicateInteractive:
return "interactive"
case DeduplicateSkip:
return "skip"
case DeduplicateFirst:
return "first"
case DeduplicateNewest:
return "newest"
case DeduplicateOldest:
return "oldest"
case DeduplicateRename:
return "rename"
}
return "unknown"
}
// Deduplicate interactively finds duplicate files and offers to
// delete all but one or rename them to be different. Only useful with
// Google Drive which can have duplicate file names.
func Deduplicate(f Fs, mode DeduplicateMode) error {
Log(f, "Looking for duplicates using %v mode.", mode)
files := map[string][]Object{}
for o := range f.List() {
remote := o.Remote()
files[remote] = append(files[remote], o)
}
for remote, objs := range files {
if len(objs) > 1 {
Log(remote, "Found %d duplicates - deleting identical copies", len(objs))
objs = dedupeDeleteIdentical(remote, objs)
if len(objs) <= 1 {
Log(remote, "All duplicates removed")
continue
}
switch mode {
case DeduplicateInteractive:
dedupeInteractive(remote, objs)
case DeduplicateFirst:
dedupeDeleteAllButOne(0, remote, objs)
case DeduplicateNewest:
sort.Sort(objectsSortedByModTime(objs)) // sort oldest first
dedupeDeleteAllButOne(len(objs)-1, remote, objs)
case DeduplicateOldest:
sort.Sort(objectsSortedByModTime(objs)) // sort oldest first
dedupeDeleteAllButOne(0, remote, objs)
case DeduplicateRename:
dedupeRename(remote, objs)
case DeduplicateSkip:
// skip
default:
//skip
}
}
}
return nil
}

File diff suppressed because it is too large Load Diff

222
fs/test_all.go Normal file
View File

@@ -0,0 +1,222 @@
// +build ignore
// Run tests for all the remotes
//
// Run with go run test_all.go
package main
import (
"flag"
"log"
"os"
"os/exec"
"runtime"
"strings"
"time"
"github.com/ncw/rclone/fs"
_ "github.com/ncw/rclone/fs/all" // import all fs
"github.com/ncw/rclone/fstest"
)
var (
remotes = []string{
"TestSwift:",
"TestS3:",
"TestDrive:",
"TestGoogleCloudStorage:",
"TestDropbox:",
"TestAmazonCloudDrive:",
"TestOneDrive:",
"TestHubic:",
"TestB2:",
"TestYandex:",
}
binary = "fs.test"
// Flags
maxTries = flag.Int("maxtries", 3, "Number of times to try each test")
runTests = flag.String("run", "", "Comma separated list of remotes to test, eg 'TestSwift:,TestS3'")
verbose = flag.Bool("verbose", false, "Run the tests with -v")
clean = flag.Bool("clean", false, "Instead of testing, clean all left over test directories")
runOnly = flag.String("run-only", "", "Run only those tests matching the regexp supplied")
)
// test holds info about a running test
type test struct {
remote string
subdir bool
cmdLine []string
cmdString string
try int
err error
output []byte
}
// newTest creates a new test
func newTest(remote string, subdir bool) *test {
t := &test{
remote: remote,
subdir: subdir,
cmdLine: []string{"./" + binary, "-remote", remote},
try: 1,
}
if *verbose {
t.cmdLine = append(t.cmdLine, "-test.v")
}
if *runOnly != "" {
t.cmdLine = append(t.cmdLine, "-test.run", *runOnly)
}
if subdir {
t.cmdLine = append(t.cmdLine, "-subdir")
}
t.cmdString = strings.Join(t.cmdLine, " ")
return t
}
// trial runs a single test
func (t *test) trial() {
log.Printf("%q - Starting (try %d/%d)", t.cmdString, t.try, *maxTries)
cmd := exec.Command(t.cmdLine[0], t.cmdLine[1:]...)
start := time.Now()
t.output, t.err = cmd.CombinedOutput()
duration := time.Since(start)
if t.passed() {
log.Printf("%q - Finished OK in %v (try %d/%d)", t.cmdString, duration, t.try, *maxTries)
} else {
log.Printf("%q - Finished ERROR in %v (try %d/%d): %v", t.cmdString, duration, t.try, *maxTries, t.err)
}
}
// cleanFs runs a single clean fs for left over directories
func (t *test) cleanFs() error {
f, err := fs.NewFs(t.remote)
if err != nil {
return err
}
for dir := range f.ListDir() {
if fstest.MatchTestRemote.MatchString(dir.Name) {
log.Printf("Purging %s%s", t.remote, dir.Name)
dir, err := fs.NewFs(t.remote + dir.Name)
if err != nil {
return err
}
err = fs.Purge(dir)
if err != nil {
return err
}
}
}
return nil
}
// clean runs a single clean on a fs for left over directories
func (t *test) clean() {
log.Printf("%q - Starting clean (try %d/%d)", t.remote, t.try, *maxTries)
start := time.Now()
t.err = t.cleanFs()
if t.err != nil {
log.Printf("%q - Failed to purge %v", t.remote, t.err)
}
duration := time.Since(start)
if t.passed() {
log.Printf("%q - Finished OK in %v (try %d/%d)", t.cmdString, duration, t.try, *maxTries)
} else {
log.Printf("%q - Finished ERROR in %v (try %d/%d): %v", t.cmdString, duration, t.try, *maxTries, t.err)
}
}
// passed returns true if the test passed
func (t *test) passed() bool {
return t.err == nil
}
// run runs all the trials for this test
func (t *test) run(result chan<- *test) {
for t.try = 1; t.try <= *maxTries; t.try++ {
if *clean {
if !t.subdir {
t.clean()
}
} else {
t.trial()
}
if t.passed() {
break
}
}
if !t.passed() {
log.Println("------------------------------------------------------------")
log.Println(string(t.output))
log.Println("------------------------------------------------------------")
}
result <- t
}
// makeTestBinary makes the binary we will run
func makeTestBinary() {
if runtime.GOOS == "windows" {
binary += ".exe"
}
log.Printf("Making test binary %q", binary)
err := exec.Command("go", "test", "-c", "-o", binary).Run()
if err != nil {
log.Fatalf("Failed to make test binary: %v", err)
}
if _, err := os.Stat(binary); err != nil {
log.Fatalf("Couldn't find test binary %q", binary)
}
}
// removeTestBinary removes the binary made in makeTestBinary
func removeTestBinary() {
err := os.Remove(binary) // Delete the binary when finished
if err != nil {
log.Printf("Error removing test binary %q: %v", binary, err)
}
}
func main() {
flag.Parse()
if *runTests != "" {
remotes = strings.Split(*runTests, ",")
}
log.Printf("Testing remotes: %s", strings.Join(remotes, ", "))
start := time.Now()
if *clean {
fs.LoadConfig()
} else {
makeTestBinary()
defer removeTestBinary()
}
// start the tests
results := make(chan *test, 8)
awaiting := 0
for _, remote := range remotes {
awaiting += 2
go newTest(remote, false).run(results)
go newTest(remote, true).run(results)
}
// Wait for the tests to finish
var failed []*test
for ; awaiting > 0; awaiting-- {
t := <-results
if !t.passed() {
failed = append(failed, t)
}
}
duration := time.Since(start)
// Summarise results
if len(failed) == 0 {
log.Printf("PASS: All tests finished OK in %v", duration)
} else {
log.Printf("FAIL: %d tests failed in %v", len(failed), duration)
for _, t := range failed {
log.Printf(" * %s", t.cmdString)
}
os.Exit(1)
}
}

View File

@@ -1,32 +0,0 @@
#!/bin/bash
go install
REMOTES="
TestSwift:
TestS3:
TestDrive:
TestGoogleCloudStorage:
TestDropbox:
TestAmazonCloudDrive:
TestOneDrive:
TestHubic:
"
function test_remote {
args=$@
echo "@go test $args"
go test $args || {
echo "*** test $args FAILED ***"
exit 1
}
}
test_remote
test_remote --subdir
for remote in $REMOTES; do
test_remote --remote $remote
test_remote --remote $remote --subdir
done
echo "All OK"

4
fs/testdata/enc-invalid.conf vendored Normal file
View File

@@ -0,0 +1,4 @@
# Encrypted rclone configuration File
RCLONE_ENCRYPT_V0:
b5Uk6mE3cUn5Wb8xiWYnVBAxXUirAaEG1PO/GIDiO9274AOæøå+Yj790BwJA4d2y7lNkmHt4nJwIsoueFvUYmm7RDyzER8IA3XOCrjzl3OUcczZqcplk5JfBdhxMZpt1aGYWUdle1IgO/kAFne6sLD6IuxPySEb

4
fs/testdata/enc-short.conf vendored Normal file
View File

@@ -0,0 +1,4 @@
# Encrypted rclone configuration File
RCLONE_ENCRYPT_V0:
b5Uk6mE3cUn5Wb8xi

4
fs/testdata/enc-too-new.conf vendored Normal file
View File

@@ -0,0 +1,4 @@
# Encrypted rclone configuration File
RCLONE_ENCRYPT_V1:
b5Uk6mE3cUn5Wb8xiWYnVBAxXUirAaEG1PO/GIDiO9274AO+Yj790BwJA4d2y7lNkmHt4nJwIsoueFvUYmm7RDyzER8IA3XOCrjzl3OUcczZqcplk5JfBdhxMZpt1aGYWUdle1IgO/kAFne6sLD6IuxPySEb

4
fs/testdata/encrypted.conf vendored Normal file
View File

@@ -0,0 +1,4 @@
# Encrypted rclone configuration File
RCLONE_ENCRYPT_V0:
b5Uk6mE3cUn5Wb8xiWYnVBAxXUirAaEG1PO/GIDiO9274AO+Yj790BwJA4d2y7lNkmHt4nJwIsoueFvUYmm7RDyzER8IA3XOCrjzl3OUcczZqcplk5JfBdhxMZpt1aGYWUdle1IgO/kAFne6sLD6IuxPySEb

12
fs/testdata/plain.conf vendored Normal file
View File

@@ -0,0 +1,12 @@
[RCLONE_ENCRYPT_V0]
type = local
nounc = true
[nounc]
type = local
nounc = true
[unc]
type = local
nounc = false

View File

@@ -1,4 +1,4 @@
package fs
// Version of rclone
const Version = "v1.25"
const Version = "v1.29"

View File

@@ -4,11 +4,14 @@ package fstest
// FIXME put name of test FS in Fs structure
import (
"bytes"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
@@ -16,6 +19,11 @@ import (
"github.com/ncw/rclone/fs"
)
var (
// MatchTestRemote matches the remote names used for testing
MatchTestRemote = regexp.MustCompile(`^rclone-test-[abcdefghijklmnopqrstuvwxyz0123456789]{24}$`)
)
// Seed the random number generator
func init() {
rand.Seed(time.Now().UnixNano())
@@ -25,12 +33,29 @@ func init() {
// Item represents an item for checking
type Item struct {
Path string
Md5sum string
Hashes map[fs.HashType]string
ModTime time.Time
Size int64
WinPath string
}
// NewItem creates an item from a string content
func NewItem(Path, Content string, modTime time.Time) Item {
i := Item{
Path: Path,
ModTime: modTime,
Size: int64(len(Content)),
}
hash := fs.NewMultiHasher()
buf := bytes.NewBufferString(Content)
_, err := io.Copy(hash, buf)
if err != nil {
log.Fatalf("Failed to create item: %v", err)
}
i.Hashes = hash.Sums()
return i
}
// CheckTimeEqualWithPrecision checks the times are equal within the
// precision, returns the delta and a flag
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
@@ -49,21 +74,29 @@ func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, prec
}
}
// Check checks all the attributes of the object are correct
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
// CheckHashes checks all the hashes the object supports are correct
func (i *Item) CheckHashes(t *testing.T, obj fs.Object) {
if obj == nil {
t.Fatalf("Object is nil")
}
// Check attributes
Md5sum, err := obj.Md5sum()
if err != nil {
t.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
}
if !fs.Md5sumsEqual(i.Md5sum, Md5sum) {
t.Errorf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
types := obj.Fs().Hashes().Array()
for _, hash := range types {
// Check attributes
sum, err := obj.Hash(hash)
if err != nil {
t.Fatalf("%s: Failed to read hash %v for %q: %v", obj.Fs().String(), hash, obj.Remote(), err)
}
if !fs.HashEquals(i.Hashes[hash], sum) {
t.Errorf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), hash, i.Hashes[hash], sum)
}
}
}
// Check checks all the attributes of the object are correct
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
i.CheckHashes(t, obj)
if i.Size != obj.Size() {
t.Errorf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
t.Errorf("%s/%s: Size incorrect - expecting %d got %d", obj.Fs().String(), obj.Remote(), i.Size, obj.Size())
}
i.CheckModTime(t, obj, obj.ModTime(), precision)
}
@@ -109,7 +142,7 @@ func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
func (is *Items) Done(t *testing.T) {
if len(is.byName) != 0 {
for name := range is.byName {
log.Printf("Not found %q", name)
t.Logf("Not found %q", name)
}
t.Errorf("%d objects not found", len(is.byName))
}
@@ -121,17 +154,26 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision ti
is := NewItems(items)
oldErrors := fs.Stats.GetErrors()
var objs []fs.Object
const retries = 10
const retries = 6
sleep := time.Second / 2
for i := 1; i <= retries; i++ {
objs = nil
for obj := range f.List() {
objs = append(objs, obj)
}
if len(objs) == len(items) {
// Put an extra sleep in if we did any retries just to make sure it really
// is consistent (here is looking at you Amazon Cloud Drive!)
if i != 1 {
extraSleep := 5*time.Second + sleep
t.Logf("Sleeping for %v just to make sure", extraSleep)
time.Sleep(extraSleep)
}
break
}
t.Logf("Sleeping for 1 second for list eventual consistency: %d/%d", i, retries)
time.Sleep(1 * time.Second)
sleep *= 2
t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries)
time.Sleep(sleep)
}
for _, obj := range objs {
if obj == nil {
@@ -153,6 +195,12 @@ func CheckListing(t *testing.T, f fs.Fs, items []Item) {
CheckListingWithPrecision(t, f, items, precision)
}
// CheckItems checks the fs to see if it has only the items passed in
// using a precision of fs.Config.ModifyWindow
func CheckItems(t *testing.T, f fs.Fs, items ...Item) {
CheckListingWithPrecision(t, f, items, fs.Config.ModifyWindow)
}
// Time parses a time string or logs a fatal error
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
@@ -164,9 +212,17 @@ func Time(timeString string) time.Time {
// RandomString create a random string for test purposes
func RandomString(n int) string {
source := "abcdefghijklmnopqrstuvwxyz0123456789"
const (
vowel = "aeiou"
consonant = "bcdfghjklmnpqrstvwxyz"
digit = "0123456789"
)
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
out := make([]byte, n)
p := 0
for i := range out {
source := pattern[p]
p = (p + 1) % len(pattern)
out[i] = source[rand.Intn(len(source))]
}
return string(out)
@@ -200,7 +256,10 @@ func RandomRemoteName(remoteName string) (string, string, error) {
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
leafName = RandomString(32)
leafName = "rclone-test-" + RandomString(24)
if !MatchTestRemote.MatchString(leafName) {
log.Fatalf("%q didn't match the test remote name regexp", leafName)
}
remoteName += leafName
}
return remoteName, leafName, nil
@@ -224,7 +283,7 @@ func RandomRemote(remoteName string, subdir bool) (fs.Fs, func(), error) {
if err != nil {
return nil, nil, err
}
remoteName += "/" + RandomString(8)
remoteName += "/rclone-test-subdir-" + RandomString(8)
}
remote, err := fs.NewFs(remoteName)

View File

@@ -7,8 +7,6 @@ package fstests
import (
"bytes"
"crypto/md5"
"encoding/hex"
"flag"
"io"
"log"
@@ -38,10 +36,13 @@ var (
Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`,
WinPath: `hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _/z.txt`,
}
verbose = flag.Bool("verbose", false, "Set to enable logging")
dumpHeaders = flag.Bool("dump-headers", false, "Dump HTTP headers - may contain sensitive info")
dumpBodies = flag.Bool("dump-bodies", false, "Dump HTTP headers and bodies - may contain sensitive info")
)
const eventualConsistencyRetries = 10
func init() {
flag.StringVar(&RemoteName, "remote", "", "Set this to override the default remote name (eg s3:)")
}
@@ -49,9 +50,14 @@ func init() {
// TestInit tests basic intitialisation
func TestInit(t *testing.T) {
var err error
// Never ask for passwords, fail instead.
// If your local config is encrypted set environment variable
// "RCLONE_CONFIG_PASS=hunter2" (or your password)
*fs.AskPassword = false
fs.LoadConfig()
fs.Config.Verbose = false
fs.Config.Quiet = true
fs.Config.Verbose = *verbose
fs.Config.Quiet = !*verbose
fs.Config.DumpHeaders = *dumpHeaders
fs.Config.DumpBodies = *dumpBodies
t.Logf("Using remote %q", RemoteName)
@@ -137,7 +143,15 @@ func TestFsNewFsObjectNotFound(t *testing.T) {
}
func findObject(t *testing.T, Name string) fs.Object {
obj := remote.NewFsObject(Name)
var obj fs.Object
for i := 1; i <= eventualConsistencyRetries; i++ {
obj = remote.NewFsObject(Name)
if obj != nil {
break
}
t.Logf("Sleeping for 1 second for findObject eventual consistency: %d/%d", i, eventualConsistencyRetries)
time.Sleep(1 * time.Second)
}
if obj == nil {
t.Fatalf("Object not found: %q", Name)
}
@@ -146,15 +160,16 @@ func findObject(t *testing.T, Name string) fs.Object {
func testPut(t *testing.T, file *fstest.Item) {
buf := bytes.NewBufferString(fstest.RandomString(100))
hash := md5.New()
hash := fs.NewMultiHasher()
in := io.TeeReader(buf, hash)
file.Size = int64(buf.Len())
obj, err := remote.Put(in, file.Path, file.ModTime, file.Size)
obji := fs.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
obj, err := remote.Put(in, obji)
if err != nil {
t.Fatal("Put error", err)
}
file.Md5sum = hex.EncodeToString(hash.Sum(nil))
file.Hashes = hash.Sums()
file.Check(t, obj, remote.Precision())
// Re-read the object and check again
obj = findObject(t, file.Path)
@@ -177,12 +192,19 @@ func TestFsPutFile2(t *testing.T) {
func TestFsListDirFile2(t *testing.T) {
skipIfNotOk(t)
found := false
for obj := range remote.ListDir() {
if obj.Name != `hello? sausage` && obj.Name != `hello_ sausage` {
t.Errorf("Found unexpected item %q", obj.Name)
} else {
found = true
for i := 1; i <= eventualConsistencyRetries; i++ {
for obj := range remote.ListDir() {
if obj.Name != `hello? sausage` && obj.Name != `hello_ sausage` {
t.Errorf("Found unexpected item %q", obj.Name)
} else {
found = true
}
}
if found {
break
}
t.Logf("Sleeping for 1 second for TestFsListDirFile2 eventual consistency: %d/%d", i, eventualConsistencyRetries)
time.Sleep(1 * time.Second)
}
if !found {
t.Errorf("Didn't find %q", `hello? sausage`)
@@ -289,7 +311,7 @@ func TestFsCopy(t *testing.T) {
// check file exists in new listing
fstest.CheckListing(t, remote, []fstest.Item{file1, file2, file1Copy})
// Check dst lightly - list above has checked ModTime/Md5sum
// Check dst lightly - list above has checked ModTime/Hashes
if dst.Remote() != file1Copy.Path {
t.Errorf("object path: want %q got %q", file1Copy.Path, dst.Remote())
}
@@ -325,7 +347,7 @@ func TestFsMove(t *testing.T) {
// check file exists in new listing
fstest.CheckListing(t, remote, []fstest.Item{file2, file1Move})
// Check dst lightly - list above has checked ModTime/Md5sum
// Check dst lightly - list above has checked ModTime/Hashes
if dst.Remote() != file1Move.Path {
t.Errorf("object path: want %q got %q", file1Move.Path, dst.Remote())
}
@@ -456,17 +478,11 @@ func TestObjectRemote(t *testing.T) {
}
}
// TestObjectMd5sum tests the MD5SUM of the object is correct
func TestObjectMd5sum(t *testing.T) {
// TestObjectHashes checks all the hashes the object supports
func TestObjectHashes(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, file1.Path)
Md5sum, err := obj.Md5sum()
if err != nil {
t.Errorf("Error in Md5sum: %v", err)
}
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
}
file1.CheckHashes(t, obj)
}
// TestObjectModTime tests the ModTime of the object is correct
@@ -481,7 +497,13 @@ func TestObjectSetModTime(t *testing.T) {
skipIfNotOk(t)
newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z")
obj := findObject(t, file1.Path)
obj.SetModTime(newModTime)
err := obj.SetModTime(newModTime)
if err == fs.ErrorCantSetModTime {
t.Log(err)
return
} else if err != nil {
t.Fatal(err)
}
file1.ModTime = newModTime
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
// And make a new object and read it from there too
@@ -505,8 +527,8 @@ func TestObjectOpen(t *testing.T) {
if err != nil {
t.Fatalf("Open() return error: %v", err)
}
hash := md5.New()
n, err := io.Copy(hash, in)
hasher := fs.NewMultiHasher()
n, err := io.Copy(hasher, in)
if err != nil {
t.Fatalf("io.Copy() return error: %v", err)
}
@@ -517,26 +539,31 @@ func TestObjectOpen(t *testing.T) {
if err != nil {
t.Fatalf("in.Close() return error: %v", err)
}
Md5sum := hex.EncodeToString(hash.Sum(nil))
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
// Check content of file by comparing the calculated hashes
for hashType, got := range hasher.Sums() {
want := file1.Hashes[hashType]
if want != got {
t.Errorf("%v is wrong %v != %v", hashType, want, got)
}
}
}
// TestObjectUpdate tests that Update works
func TestObjectUpdate(t *testing.T) {
skipIfNotOk(t)
buf := bytes.NewBufferString(fstest.RandomString(200))
hash := md5.New()
hash := fs.NewMultiHasher()
in := io.TeeReader(buf, hash)
file1.Size = int64(buf.Len())
obj := findObject(t, file1.Path)
err := obj.Update(in, file1.ModTime, file1.Size)
obji := fs.NewStaticObjectInfo("", file1.ModTime, file1.Size, true, nil, obj.Fs())
err := obj.Update(in, obji)
if err != nil {
t.Fatal("Update error", err)
}
file1.Md5sum = hex.EncodeToString(hash.Sum(nil))
file1.Hashes = hash.Sums()
file1.Check(t, obj, remote.Precision())
// Re-read the object and check again
obj = findObject(t, file1.Path)

View File

@@ -133,5 +133,7 @@ func main() {
generateTestProgram(t, fns, "AmazonCloudDrive")
generateTestProgram(t, fns, "OneDrive")
generateTestProgram(t, fns, "Hubic")
generateTestProgram(t, fns, "B2")
generateTestProgram(t, fns, "Yandex")
log.Printf("Done")
}

View File

@@ -34,12 +34,12 @@ import (
)
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneClientSecret = "8p/yms3OlNXE9OTDl/HLypf9gdiJ5cT3"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 256 // chunk size to read directory listings
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "8p/yms3OlNXE9OTDl/HLypf9gdiJ5cT3"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 256 // chunk size to read directory listings
)
var (
@@ -48,27 +48,28 @@ var (
Scopes: []string{storage.DevstorageFullControlScope},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: fs.Reveal(rcloneClientSecret),
ClientSecret: fs.Reveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "google cloud storage",
NewFs: NewFs,
fs.Register(&fs.RegInfo{
Name: "google cloud storage",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config(name, storageConfig)
err := oauthutil.Config("google cloud storage", name, storageConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: oauthutil.ConfigClientID,
Name: fs.ConfigClientID,
Help: "Google Application Client Id - leave blank normally.",
}, {
Name: oauthutil.ConfigClientSecret,
Name: fs.ConfigClientSecret,
Help: "Google Application Client Secret - leave blank normally.",
}, {
Name: "project_number",
@@ -379,13 +380,13 @@ func (f *Fs) ListDir() fs.DirChan {
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
remote: src.Remote(),
}
return o, o.Update(in, modTime, size)
return o, o.Update(in, src)
}
// Mkdir creates the bucket if it doesn't exist
@@ -458,10 +459,15 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
return dstObj, nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
func (o *Object) Fs() fs.Info {
return o.fs
}
@@ -478,8 +484,11 @@ func (o *Object) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Md5sum() (string, error) {
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
}
return o.md5sum, nil
}
@@ -558,7 +567,7 @@ func metadataFromModTime(modTime time.Time) map[string]string {
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) {
func (o *Object) SetModTime(modTime time.Time) error {
// This only adds metadata so will perserve other metadata
object := storage.Object{
Bucket: o.fs.bucket,
@@ -567,10 +576,10 @@ func (o *Object) SetModTime(modTime time.Time) {
}
newObject, err := o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
return err
}
o.setMetaData(newObject)
return nil
}
// Storable returns a boolean as to whether this object is storable
@@ -609,7 +618,10 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
size := src.Size()
modTime := src.ModTime()
object := storage.Object{
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,

View File

@@ -42,7 +42,7 @@ func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }

View File

@@ -9,7 +9,6 @@ package hubic
import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"time"
@@ -22,8 +21,8 @@ import (
)
const (
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
rcloneClientSecret = "8MrG3pjWyJya4OnO9ZTS4emI+9fa1ouPgvfD2MbTzfDYvO/H5czFxsTXtcji4/Hz3snz8/CrzMzlxvP9//Ty/Q=="
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
rcloneEncryptedClientSecret = "8MrG3pjWyJya4OnO9ZTS4emI+9fa1ouPgvfD2MbTzfDYvO/H5czFxsTXtcji4/Hz3snz8/CrzMzlxvP9//Ty/Q=="
)
// Globals
@@ -38,27 +37,28 @@ var (
TokenURL: "https://api.hubic.com/oauth/token/",
},
ClientID: rcloneClientID,
ClientSecret: fs.Reveal(rcloneClientSecret),
ClientSecret: fs.Reveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "hubic",
NewFs: NewFs,
fs.Register(&fs.RegInfo{
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config(name, oauthConfig)
err := oauthutil.Config("hubic", name, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: oauthutil.ConfigClientID,
Name: fs.ConfigClientID,
Help: "Hubic Client Id - leave blank normally.",
}, {
Name: oauthutil.ConfigClientSecret,
Name: fs.ConfigClientSecret,
Help: "Hubic Client Secret - leave blank normally.",
}},
})
@@ -103,15 +103,6 @@ func (f *Fs) String() string {
return fmt.Sprintf("Hubic %s", f.Fs.String())
}
// checkClose is a utility function used to check the return from
// Close in a defer statement.
func checkClose(c io.Closer, err *error) {
cerr := c.Close()
if *err == nil {
*err = cerr
}
}
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
@@ -125,7 +116,7 @@ func (f *Fs) getCredentials() (err error) {
if err != nil {
return err
}
defer checkClose(resp.Body, &err)
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return fmt.Errorf("Failed to get credentials: %s", resp.Status)
}
@@ -217,6 +208,12 @@ func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// Hashes returns the supported hash sets.
// Inherited from swift
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)

View File

@@ -42,7 +42,7 @@ func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }

View File

@@ -2,10 +2,7 @@
package local
import (
"crypto/md5"
"encoding/hex"
"fmt"
"hash"
"io"
"io/ioutil"
"os"
@@ -22,10 +19,21 @@ import (
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "local",
NewFs: NewFs,
})
fsi := &fs.RegInfo{
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Optional: true,
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
}},
}},
}
fs.Register(fsi)
}
// Fs represents a local filesystem rooted at root
@@ -34,16 +42,18 @@ type Fs struct {
root string // The root directory
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
wmu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
nounc bool // Skip UNC conversion on Windows
}
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path
path string // The local path
info os.FileInfo // Interface for file info (always present)
md5sum string // the md5sum of the object or "" if not calculated
fs *Fs // The Fs this object is part of
remote string // The remote path
path string // The local path
info os.FileInfo // Interface for file info (always present)
hashes map[fs.HashType]string // Hashes
}
// ------------------------------------------------------------
@@ -52,11 +62,13 @@ type Object struct {
func NewFs(name, root string) (fs.Fs, error) {
var err error
nounc, _ := fs.ConfigFile.GetValue(name, "nounc")
f := &Fs{
name: name,
warned: make(map[string]struct{}),
nounc: nounc == "true",
}
f.root = filterPath(f.cleanUtf8(root))
f.root = f.filterPath(f.cleanUtf8(root))
// Check to see if this points to a file
fi, err := os.Lstat(f.root)
@@ -89,7 +101,7 @@ func (f *Fs) String() string {
// newFsObject makes a half completed Object
func (f *Fs) newFsObject(remote string) *Object {
remote = filepath.ToSlash(remote)
dstPath := filterPath(filepath.Join(f.root, f.cleanUtf8(remote)))
dstPath := f.filterPath(filepath.Join(f.root, f.cleanUtf8(remote)))
return &Object{
fs: f,
remote: remote,
@@ -164,10 +176,12 @@ func (f *Fs) List() fs.ObjectsChan {
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
func (f *Fs) cleanUtf8(name string) string {
if !utf8.ValidString(name) {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
name = string([]rune(name))
}
if runtime.GOOS == "windows" {
@@ -195,7 +209,7 @@ func (f *Fs) ListDir() fs.DirChan {
Count: 0,
}
// Go down the tree to count the files and directories
dirpath := filterPath(filepath.Join(f.root, item.Name()))
dirpath := f.filterPath(filepath.Join(f.root, item.Name()))
err := filepath.Walk(dirpath, func(path string, fi os.FileInfo, err error) error {
if err != nil {
fs.Stats.Error()
@@ -220,10 +234,11 @@ func (f *Fs) ListDir() fs.DirChan {
}
// Put the FsObject to the local filesystem
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
remote := src.Remote()
// Temporary FsObject under construction - info filled in by Update()
o := f.newFsObject(remote)
err := o.Update(in, modTime, size)
err := o.Update(in, src)
if err != nil {
return nil, err
}
@@ -404,10 +419,15 @@ func (f *Fs) DirMove(src fs.Fs) error {
return os.Rename(srcFs.root, f.root)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.SupportedHashes
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
func (o *Object) Fs() fs.Info {
return o.fs
}
@@ -424,32 +444,44 @@ func (o *Object) Remote() string {
return o.fs.cleanUtf8(o.remote)
}
// Md5sum calculates the Md5sum of a file returning a lowercase hex string
func (o *Object) Md5sum() (string, error) {
if o.md5sum != "" {
return o.md5sum, nil
}
in, err := os.Open(o.path)
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(r fs.HashType) (string, error) {
// Check that the underlying file hasn't changed
oldtime := o.info.ModTime()
oldsize := o.info.Size()
err := o.lstat()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to open: %s", err)
fs.ErrorLog(o, "Failed to stat: %s", err)
return "", err
}
hash := md5.New()
_, err = io.Copy(hash, in)
closeErr := in.Close()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to read: %s", err)
return "", err
if !o.info.ModTime().Equal(oldtime) || oldsize != o.info.Size() {
o.hashes = nil
}
if closeErr != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to close: %s", closeErr)
return "", closeErr
if o.hashes == nil {
o.hashes = make(map[fs.HashType]string)
in, err := os.Open(o.path)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to open: %s", err)
return "", err
}
o.hashes, err = fs.HashStream(in)
closeErr := in.Close()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to read: %s", err)
return "", err
}
if closeErr != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to close: %s", closeErr)
return "", closeErr
}
}
o.md5sum = hex.EncodeToString(hash.Sum(nil))
return o.md5sum, nil
return o.hashes[r], nil
}
// Size returns the size of an object in bytes
@@ -463,18 +495,13 @@ func (o *Object) ModTime() time.Time {
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) {
func (o *Object) SetModTime(modTime time.Time) error {
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
fs.Debug(o, "Failed to set mtime on file: %s", err)
return
return err
}
// Re-read metadata
err = o.lstat()
if err != nil {
fs.Debug(o, "Failed to stat: %s", err)
return
}
return o.lstat()
}
// Storable returns a boolean showing if this object is storable
@@ -493,9 +520,9 @@ func (o *Object) Storable() bool {
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
// object that is read
type localOpenFile struct {
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash hash.Hash // currently accumulating MD5
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash *fs.MultiHasher // currently accumulating hashes
}
// Read bytes from the object - see io.Reader
@@ -512,9 +539,9 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
file.o.md5sum = hex.EncodeToString(file.hash.Sum(nil))
file.o.hashes = file.hash.Sums()
} else {
file.o.md5sum = ""
file.o.hashes = nil
}
return err
}
@@ -529,7 +556,7 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
in = &localOpenFile{
o: o,
in: in,
hash: md5.New(),
hash: fs.NewMultiHasher(),
}
return
}
@@ -541,7 +568,7 @@ func (o *Object) mkdirAll() error {
}
// Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
err := o.mkdirAll()
if err != nil {
return err
@@ -552,8 +579,8 @@ func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
return err
}
// Calculate the md5sum of the object we are reading as we go along
hash := md5.New()
// Calculate the hash of the object we are reading as we go along
hash := fs.NewMultiHasher()
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
@@ -565,11 +592,14 @@ func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
return outErr
}
// All successful so update the md5sum
o.md5sum = hex.EncodeToString(hash.Sum(nil))
// All successful so update the hashes
o.hashes = hash.Sums()
// Set the mtime
o.SetModTime(modTime)
err = o.SetModTime(src.ModTime())
if err != nil {
return err
}
// ReRead info now that we have finished
return o.lstat()
@@ -594,7 +624,7 @@ func getDirFile(s string) (string, string) {
return s[:i], s[i+1:]
}
func filterPath(s string) string {
func (f *Fs) filterPath(s string) string {
s = filepath.Clean(s)
if runtime.GOOS == "windows" {
s = strings.Replace(s, `/`, `\`, -1)
@@ -606,6 +636,9 @@ func filterPath(s string) string {
}
}
if f.nounc {
return s
}
// Convert to UNC
return uncPath(s)
}
@@ -674,10 +707,12 @@ func cleanWindowsName(f *Fs, name string) string {
}, name)
if name2 != original && f != nil {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid characters in %q to %q", name, name2)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
}
return name2
}

View File

@@ -42,7 +42,7 @@ func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }

View File

@@ -52,9 +52,9 @@ func TestUncPaths(t *testing.T) {
}
var utf8Tests = [][2]string{
[2]string{"ABC", "ABC"},
[2]string{string([]byte{0x80}), "<22>"},
[2]string{string([]byte{'a', 0x80, 'b'}), "a<>b"},
{"ABC", "ABC"},
{string([]byte{0x80}), "<22>"},
{string([]byte{'a', 0x80, 'b'}), "a<>b"},
}
func TestCleanUtf8(t *testing.T) {
@@ -71,13 +71,13 @@ func TestCleanUtf8(t *testing.T) {
// Test Windows character replacements
var testsWindows = [][2]string{
[2]string{`c:\temp`, `c:\temp`},
[2]string{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
[2]string{`//?/UNC/theserver/dir\file.txt`, `//?/UNC/theserver/dir\file.txt`},
[2]string{"c:/temp", "c:/temp"},
[2]string{"/temp/file.txt", "/temp/file.txt"},
[2]string{`!\"#¤%&/()=;:*^?+-`, "!\\_#¤%&/()=;__^_+-"},
[2]string{`<>"|?*:&\<>"|?*:&\<>"|?*:&`, "_______&\\_______&\\_______&"},
{`c:\temp`, `c:\temp`},
{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
{`//?/UNC/theserver/dir\file.txt`, `//?/UNC/theserver/dir\file.txt`},
{"c:/temp", "c:/temp"},
{"/temp/file.txt", "/temp/file.txt"},
{`!\"#¤%&/()=;:*^?+-`, "!\\_#¤%&/()=;__^_+-"},
{`<>"|?*:&\<>"|?*:&\<>"|?*:&`, "_______&\\_______&\\_______&"},
}
func TestCleanWindows(t *testing.T) {

View File

@@ -16,6 +16,7 @@ docs = [
"about.md",
"install.md",
"docs.md",
"remote_setup.md",
"filtering.md",
"overview.md",
"drive.md",
@@ -26,6 +27,8 @@ docs = [
"amazonclouddrive.md",
"onedrive.md",
"hubic.md",
"b2.md",
"yandex.md",
"local.md",
"changelog.md",
"bugs.md",

View File

@@ -1,51 +1,20 @@
Perhaps make Md5sum() and Modtime() optional. Define the zero values
"" and 0. Make it so we can support remotes which can't do those.
Fix the docs
* factor the README.md into the docs directory
* create it as part of make by assembling other parts
* write long docs about each flag
Change lsd command so it doesn't show -1
* Make sure all Fses show -1 for objects Zero for dates etc
* Make test?
Put the TestRemote names into the Fs description
Make test_all.sh use the TestRemote name automatically
Run errcheck and go vet in the make file
.. Also race detector?
.. go tool vet -shadow
Make fs/test_all.go use the TestRemote name automatically
Get rid of Storable?
Write developer manual
Todo
* FIXME: More -dry-run checks for object transfer
* Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files
* swift: Ignoring the pseudo directories
* if object.PseudoDirectory {
* fmt.Printf("%9s %19s %s\n", "Directory", "-", fs.Remote())
* Make Account wrapper
* make Account do progress meter
* -timeout: Make all timeouts be settable with command line parameters
* Add max object size to fs metadata - 5GB for swift, infinite for local, ? for s3
* tie into -max-size flag
* FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs
creation in common code? Or try for as much as possible?
* FIXME Account all the transactons (ls etc) using a different
Roundtripper wrapper which wraps the transactions?
More rsync features
* include
* exclude
* max size
* -c, --checksum skip based on checksum, not mod-time & size
Ideas for flags
* --retries N flag which would make rclone retry a sync until successful or it tried N times.
Ideas
* could do encryption - put IV into metadata?
* optimise remote copy container to another container using remote

View File

@@ -7,6 +7,7 @@ import (
"log"
"net"
"net/http"
"strings"
"time"
"github.com/ncw/rclone/fs"
@@ -16,15 +17,6 @@ import (
)
const (
// ConfigToken is the key used to store the token under
ConfigToken = "token"
// ConfigClientID is the config key used to store the client id
ConfigClientID = "client_id"
// ConfigClientSecret is the config key used to store the client secret
ConfigClientSecret = "client_secret"
// TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization
// code should be returned in the title bar of the browser, with the page text
// prompting the user to copy the code and paste it in the application.
@@ -60,7 +52,7 @@ type oldToken struct {
// getToken returns the token saved in the config file under
// section name.
func getToken(name string) (*oauth2.Token, error) {
tokenString, err := fs.ConfigFile.GetValue(string(name), ConfigToken)
tokenString, err := fs.ConfigFile.GetValue(string(name), fs.ConfigToken)
if err != nil {
return nil, err
}
@@ -103,9 +95,9 @@ func putToken(name string, token *oauth2.Token) error {
return err
}
tokenString := string(tokenBytes)
old := fs.ConfigFile.MustValue(name, ConfigToken)
old := fs.ConfigFile.MustValue(name, fs.ConfigToken)
if tokenString != old {
fs.ConfigFile.SetValue(name, ConfigToken, tokenString)
fs.ConfigFile.SetValue(name, fs.ConfigToken, tokenString)
fs.SaveConfig()
fs.Debug(name, "Saving new token in config file")
}
@@ -147,16 +139,21 @@ func Context() context.Context {
}
// overrideCredentials sets the ClientID and ClientSecret from the
// config file if they are not blank
func overrideCredentials(name string, config *oauth2.Config) {
ClientID := fs.ConfigFile.MustValue(name, ConfigClientID)
// config file if they are not blank.
// If any value is overridden, true is returned.
func overrideCredentials(name string, config *oauth2.Config) bool {
changed := false
ClientID := fs.ConfigFile.MustValue(name, fs.ConfigClientID)
if ClientID != "" {
config.ClientID = ClientID
changed = true
}
ClientSecret := fs.ConfigFile.MustValue(name, ConfigClientSecret)
ClientSecret := fs.ConfigFile.MustValue(name, fs.ConfigClientSecret)
if ClientSecret != "" {
config.ClientSecret = ClientSecret
changed = true
}
return changed
}
// NewClient gets a token from the config file and configures a Client
@@ -185,8 +182,10 @@ func NewClient(name string, config *oauth2.Config) (*http.Client, error) {
// Config does the initial creation of the token
//
// It may run an internal webserver to receive the results
func Config(name string, config *oauth2.Config) error {
overrideCredentials(name, config)
func Config(id, name string, config *oauth2.Config) error {
changed := overrideCredentials(name, config)
automatic := fs.ConfigFile.MustValue(name, fs.ConfigAutomatic) != ""
// See if already have a token
tokenString := fs.ConfigFile.MustValue(name, "token")
if tokenString != "" {
@@ -201,11 +200,42 @@ func Config(name string, config *oauth2.Config) error {
switch config.RedirectURL {
case RedirectURL, RedirectPublicURL, RedirectLocalhostURL:
useWebServer = true
case TitleBarRedirectURL:
if automatic {
break
}
fmt.Printf("Use auto config?\n")
fmt.Printf(" * Say Y if not sure\n")
fmt.Printf(" * Say N if you are working on a remote or headless machine or Y didn't work\n")
useWebServer = fs.Confirm()
fmt.Printf(" * Say N if you are working on a remote or headless machine\n")
auto := fs.Confirm()
if !auto {
fmt.Printf("For this to work, you will need rclone available on a machine that has a web browser available.\n")
fmt.Printf("Execute the following on your machine:\n")
if changed {
fmt.Printf("\trclone authorize %q %q %q\n", id, config.ClientID, config.ClientSecret)
} else {
fmt.Printf("\trclone authorize %q\n", id)
}
fmt.Println("Then paste the result below:")
code := ""
for code == "" {
fmt.Printf("result> ")
code = strings.TrimSpace(fs.ReadLine())
}
token := &oauth2.Token{}
err := json.Unmarshal([]byte(code), token)
if err != nil {
return err
}
return putToken(name, token)
}
case TitleBarRedirectURL:
useWebServer = automatic
if !automatic {
fmt.Printf("Use auto config?\n")
fmt.Printf(" * Say Y if not sure\n")
fmt.Printf(" * Say N if you are working on a remote or headless machine or Y didn't work\n")
useWebServer = fs.Confirm()
}
if useWebServer {
// copy the config and set to use the internal webserver
configCopy := *config
@@ -260,6 +290,15 @@ func Config(name string, config *oauth2.Config) error {
if err != nil {
return fmt.Errorf("Failed to get token: %v", err)
}
// Print code if we do automatic retrieval
if automatic {
result, err := json.Marshal(token)
if err != nil {
return fmt.Errorf("Failed to marshal token: %v", err)
}
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result)
}
return putToken(name, token)
}
@@ -280,15 +319,17 @@ func (s *authServer) Start() {
Addr: s.bindAddress,
Handler: mux,
}
server.SetKeepAlivesEnabled(false)
mux.HandleFunc("/favicon.ico", func(w http.ResponseWriter, req *http.Request) {
http.Error(w, "", 404)
return
})
mux.HandleFunc("/auth", func(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, s.authURL, 307)
http.Redirect(w, req, s.authURL, http.StatusTemporaryRedirect)
return
})
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "text/html")
fs.Debug(nil, "Received request on auth server")
code := req.FormValue("code")
if code != "" {
@@ -308,8 +349,9 @@ func (s *authServer) Start() {
return
}
fs.Debug(nil, "No code found on request")
fmt.Fprintf(w, "<h1>Failed!</h1>\nNo code found.")
http.Error(w, "", 500)
w.WriteHeader(500)
fmt.Fprintf(w, "<h1>Failed!</h1>\nNo code found returned by remote server.")
})
var err error

View File

@@ -18,16 +18,18 @@ import (
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/onedrive/api"
"github.com/ncw/rclone/pacer"
"github.com/ncw/rclone/rest"
"github.com/spf13/pflag"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "0000000044165769"
rcloneClientSecret = "0+be4+jYw+7018HY6P3t/Izo+pTc+Yvt8+fy8NHU094="
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
rcloneClientID = "0000000044165769"
rcloneEncryptedClientSecret = "0+be4+jYw+7018HY6P3t/Izo+pTc+Yvt8+fy8NHU094="
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api.onedrive.com/v1.0" // root URL for requests
)
// Globals
@@ -44,7 +46,7 @@ var (
TokenURL: "https://login.live.com/oauth20_token.srf",
},
ClientID: rcloneClientID,
ClientSecret: fs.Reveal(rcloneClientSecret),
ClientSecret: fs.Reveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectPublicURL,
}
chunkSize = fs.SizeSuffix(10 * 1024 * 1024)
@@ -53,20 +55,21 @@ var (
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "onedrive",
NewFs: NewFs,
fs.Register(&fs.RegInfo{
Name: "onedrive",
Description: "Microsoft OneDrive",
NewFs: NewFs,
Config: func(name string) {
err := oauthutil.Config(name, oauthConfig)
err := oauthutil.Config("onedrive", name, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: oauthutil.ConfigClientID,
Name: fs.ConfigClientID,
Help: "Microsoft App Client Id - leave blank normally.",
}, {
Name: oauthutil.ConfigClientSecret,
Name: fs.ConfigClientSecret,
Help: "Microsoft App Client Secret - leave blank normally.",
}},
})
@@ -77,7 +80,7 @@ func init() {
// Fs represents a remote one drive
type Fs struct {
name string // name of this remote
srv *api.Client // the connection to the one drive server
srv *rest.Client // the connection to the one drive server
root string // the path we are working on
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
@@ -93,6 +96,7 @@ type Object struct {
size int64 // size of the object
modTime time.Time // modification time of the object
id string // ID of the object
sha1 string // SHA-1 of the object content
}
// ------------------------------------------------------------
@@ -139,7 +143,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
opts := api.Opts{
opts := rest.Opts{
Method: "GET",
Path: "/drive/root:/" + replaceReservedChars(path),
}
@@ -150,6 +154,20 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon
return info, resp, err
}
// errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error {
// Decode error response
errResponse := new(api.Error)
err := rest.DecodeJSON(resp, &errResponse)
if err != nil {
fs.Debug(nil, "Couldn't decode error response: %v", err)
}
if errResponse.ErrorInfo.Code == "" {
errResponse.ErrorInfo.Code = resp.Status
}
return errResponse
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
root = parsePath(root)
@@ -161,9 +179,10 @@ func NewFs(name, root string) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,
srv: api.NewClient(oAuthClient),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
f.srv.SetErrorHandler(errorHandler)
// Get rootID
rootInfo, _, err := f.readMetaDataForPath("")
@@ -266,7 +285,7 @@ func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
// fs.Debug(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response
var info *api.Item
opts := api.Opts{
opts := rest.Opts{
Method: "POST",
Path: "/drive/items/" + pathID + "/children",
}
@@ -300,7 +319,7 @@ type listAllFn func(*api.Item) bool
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := api.Opts{
opts := rest.Opts{
Method: "GET",
Path: "/drive/items/" + dirID + "/children?top=1000",
}
@@ -469,12 +488,16 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
remote := src.Remote()
size := src.Size()
modTime := src.ModTime()
o, _, _, err := f.createObject(remote, modTime, size)
if err != nil {
return nil, err
}
return o, o.Update(in, modTime, size)
return o, o.Update(in, src)
}
// Mkdir creates the container if it doesn't exist
@@ -484,7 +507,7 @@ func (f *Fs) Mkdir() error {
// deleteObject removes an object by ID
func (f *Fs) deleteObject(id string) error {
opts := api.Opts{
opts := rest.Opts{
Method: "DELETE",
Path: "/drive/items/" + id,
NoResponse: true,
@@ -544,7 +567,7 @@ func (f *Fs) Precision() time.Duration {
func (f *Fs) waitForJob(location string, o *Object) error {
deadline := time.Now().Add(fs.Config.Timeout)
for time.Now().Before(deadline) {
opts := api.Opts{
opts := rest.Opts{
Method: "GET",
Path: location,
Absolute: true,
@@ -560,7 +583,7 @@ func (f *Fs) waitForJob(location string, o *Object) error {
}
if resp.StatusCode == 202 {
var status api.AsyncOperationStatus
err = api.DecodeJSON(resp, &status)
err = rest.DecodeJSON(resp, &status)
if err != nil {
return err
}
@@ -569,7 +592,7 @@ func (f *Fs) waitForJob(location string, o *Object) error {
}
} else {
var info api.Item
err = api.DecodeJSON(resp, &info)
err = rest.DecodeJSON(resp, &info)
if err != nil {
return err
}
@@ -590,7 +613,7 @@ func (f *Fs) waitForJob(location string, o *Object) error {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) FIXMEDisabledCopy(src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
@@ -608,7 +631,7 @@ func (f *Fs) FIXMEDisabledCopy(src fs.Object, remote string) (fs.Object, error)
}
// Copy the object
opts := api.Opts{
opts := rest.Opts{
Method: "POST",
Path: "/drive/items/" + srcObj.id + "/action.copy",
ExtraHeaders: map[string]string{"Prefer": "respond-async"},
@@ -653,10 +676,15 @@ func (f *Fs) Purge() error {
return f.purgeCheck(false)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashSHA1)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
func (o *Object) Fs() fs.Info {
return o.fs
}
@@ -678,9 +706,12 @@ func (o *Object) srvPath() string {
return replaceReservedChars(o.fs.rootSlash() + o.remote)
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Md5sum() (string, error) {
return "", nil // not supported by one drive
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashSHA1 {
return "", fs.ErrHashUnsupported
}
return o.sha1, nil
}
// Size returns the size of an object in bytes
@@ -697,6 +728,17 @@ func (o *Object) Size() int64 {
func (o *Object) setMetaData(info *api.Item) {
o.hasMetaData = true
o.size = info.Size
// Docs: https://dev.onedrive.com/facets/hashes_facet.htm
//
// The docs state both that the hashes are returned as hex
// strings, and as base64 strings. Testing reveals they are in
// fact uppercase hex strings.
//
// In OneDrive for Business, SHA1 and CRC32 hash values are not returned for files.
if info.File != nil && info.File.Hashes.Sha1Hash != "" {
o.sha1 = strings.ToLower(info.File.Hashes.Sha1Hash)
}
if info.FileSystemInfo != nil {
o.modTime = time.Time(info.FileSystemInfo.LastModifiedDateTime)
} else {
@@ -741,7 +783,7 @@ func (o *Object) ModTime() time.Time {
// setModTime sets the modification time of the local fs object
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
opts := api.Opts{
opts := rest.Opts{
Method: "PATCH",
Path: "/drive/root:/" + o.srvPath(),
}
@@ -760,13 +802,13 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) {
func (o *Object) SetModTime(modTime time.Time) error {
info, err := o.setModTime(modTime)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to update remote mtime: %v", err)
return err
}
o.setMetaData(info)
return nil
}
// Storable returns a boolean showing whether this object storable
@@ -780,7 +822,7 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
return nil, fmt.Errorf("Can't download no id")
}
var resp *http.Response
opts := api.Opts{
opts := rest.Opts{
Method: "GET",
Path: "/drive/items/" + o.id + "/content",
}
@@ -796,7 +838,7 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
// createUploadSession creates an upload session for the object
func (o *Object) createUploadSession() (response *api.CreateUploadResponse, err error) {
opts := api.Opts{
opts := rest.Opts{
Method: "POST",
Path: "/drive/root:/" + o.srvPath() + ":/upload.createSession",
}
@@ -811,7 +853,7 @@ func (o *Object) createUploadSession() (response *api.CreateUploadResponse, err
// uploadFragment uploads a part
func (o *Object) uploadFragment(url string, start int64, totalSize int64, buf []byte) (err error) {
bufSize := int64(len(buf))
opts := api.Opts{
opts := rest.Opts{
Method: "PUT",
Path: url,
Absolute: true,
@@ -830,7 +872,7 @@ func (o *Object) uploadFragment(url string, start int64, totalSize int64, buf []
// cancelUploadSession cancels an upload session
func (o *Object) cancelUploadSession(url string) (err error) {
opts := api.Opts{
opts := rest.Opts{
Method: "DELETE",
Path: url,
Absolute: true,
@@ -898,12 +940,15 @@ func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) (err error) {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) {
size := src.Size()
modTime := src.ModTime()
var info *api.Item
if size <= int64(uploadCutoff) {
// This is for less than 100 MB of content
var resp *http.Response
opts := api.Opts{
opts := rest.Opts{
Method: "PUT",
Path: "/drive/root:/" + o.srvPath() + ":/content",
Body: in,
@@ -940,7 +985,7 @@ func (o *Object) Remove() error {
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
// FIXME _ fs.Copier = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
// _ fs.Mover = (*Fs)(nil)
// _ fs.DirMover = (*Fs)(nil)
_ fs.Object = (*Object)(nil)

View File

@@ -42,7 +42,7 @@ func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }

View File

@@ -15,6 +15,7 @@ type Pacer struct {
minSleep time.Duration // minimum sleep time
maxSleep time.Duration // maximum sleep time
decayConstant uint // decay constant
attackConstant uint // attack constant
pacer chan struct{} // To pace the operations
sleepTime time.Duration // Time to sleep for each transaction
retries int // Max number of retries
@@ -58,11 +59,12 @@ type Paced func() (bool, error)
// New returns a Pacer with sensible defaults
func New() *Pacer {
p := &Pacer{
minSleep: 10 * time.Millisecond,
maxSleep: 2 * time.Second,
decayConstant: 2,
retries: 10,
pacer: make(chan struct{}, 1),
minSleep: 10 * time.Millisecond,
maxSleep: 2 * time.Second,
decayConstant: 2,
attackConstant: 1,
retries: fs.Config.LowLevelRetries,
pacer: make(chan struct{}, 1),
}
p.sleepTime = p.minSleep
p.SetPacer(DefaultPacer)
@@ -116,7 +118,7 @@ func (p *Pacer) SetMaxConnections(n int) *Pacer {
// This is the speed the time falls back to the minimum after errors
// have occurred.
//
// bigger for slower decay, exponential
// bigger for slower decay, exponential. 1 is halve, 0 is go straight to minimum
func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
@@ -124,6 +126,19 @@ func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
return p
}
// SetAttackConstant sets the attack constant for the pacer
//
// This is the speed the time grows from the minimum after errors have
// occurred.
//
// bigger for slower attack, 1 is double, 0 is go straight to maximum
func (p *Pacer) SetAttackConstant(attack uint) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.attackConstant = attack
return p
}
// SetRetries sets the max number of tries for Call
func (p *Pacer) SetRetries(retries int) *Pacer {
p.mu.Lock()
@@ -185,7 +200,11 @@ func (p *Pacer) beginCall() {
func (p *Pacer) defaultPacer(retry bool) {
oldSleepTime := p.sleepTime
if retry {
p.sleepTime *= 2
if p.attackConstant == 0 {
p.sleepTime = p.maxSleep
} else {
p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
}
if p.sleepTime > p.maxSleep {
p.sleepTime = p.maxSleep
}
@@ -231,7 +250,7 @@ func (p *Pacer) acdPacer(retry bool) {
if p.sleepTime < p.minSleep {
p.sleepTime = p.minSleep
}
fs.Debug("pacer", "Rate limited, sleeping for %v (%d retries)", p.sleepTime, consecutiveRetries)
fs.Debug("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, consecutiveRetries)
}
}
@@ -256,13 +275,14 @@ func (p *Pacer) endCall(retry bool) {
// call implements Call but with settable retries
func (p *Pacer) call(fn Paced, retries int) (err error) {
var retry bool
for i := 0; i < retries; i++ {
for i := 1; i <= retries; i++ {
p.beginCall()
retry, err = fn()
p.endCall(retry)
if !retry {
break
}
fs.Debug("pacer", "low level retry %d/%d", i, retries)
}
if retry {
err = fs.RetryError(err)

View File

@@ -9,6 +9,8 @@ import (
)
func TestNew(t *testing.T) {
const expectedRetries = 7
fs.Config.LowLevelRetries = expectedRetries
p := New()
if p.minSleep != 10*time.Millisecond {
t.Errorf("minSleep")
@@ -19,12 +21,15 @@ func TestNew(t *testing.T) {
if p.sleepTime != p.minSleep {
t.Errorf("sleepTime")
}
if p.retries != 10 {
t.Errorf("retries")
if p.retries != expectedRetries {
t.Errorf("retries want %v got %v", expectedRetries, p.retries)
}
if p.decayConstant != 2 {
t.Errorf("decayConstant")
}
if p.attackConstant != 1 {
t.Errorf("attackConstant")
}
if cap(p.pacer) != 1 {
t.Errorf("pacer 1")
}
@@ -83,6 +88,58 @@ func TestSetDecayConstant(t *testing.T) {
}
}
func TestDecay(t *testing.T) {
p := New().SetMinSleep(time.Microsecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second)
for _, test := range []struct {
in time.Duration
attackConstant uint
want time.Duration
}{
{8 * time.Millisecond, 1, 4 * time.Millisecond},
{1 * time.Millisecond, 0, time.Microsecond},
{1 * time.Millisecond, 2, (3 * time.Millisecond) / 4},
{1 * time.Millisecond, 3, (7 * time.Millisecond) / 8},
} {
p.sleepTime = test.in
p.SetDecayConstant(test.attackConstant)
p.defaultPacer(false)
got := p.sleepTime
if got != test.want {
t.Errorf("bad sleep want %v got %v", test.want, got)
}
}
}
func TestSetAttackConstant(t *testing.T) {
p := New().SetAttackConstant(19)
if p.attackConstant != 19 {
t.Errorf("didn't set")
}
}
func TestAttack(t *testing.T) {
p := New().SetMinSleep(time.Microsecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second)
for _, test := range []struct {
in time.Duration
attackConstant uint
want time.Duration
}{
{1 * time.Millisecond, 1, 2 * time.Millisecond},
{1 * time.Millisecond, 0, time.Second},
{1 * time.Millisecond, 2, (4 * time.Millisecond) / 3},
{1 * time.Millisecond, 3, (8 * time.Millisecond) / 7},
} {
p.sleepTime = test.in
p.SetAttackConstant(test.attackConstant)
p.defaultPacer(true)
got := p.sleepTime
if got != test.want {
t.Errorf("bad sleep want %v got %v", test.want, got)
}
}
}
func TestSetRetries(t *testing.T) {
p := New().SetRetries(18)
if p.retries != 18 {

4504
rclone.1 Normal file

File diff suppressed because it is too large Load Diff

183
rclone.go
View File

@@ -15,22 +15,14 @@ import (
"github.com/spf13/pflag"
"github.com/ncw/rclone/fs"
// Active file systems
_ "github.com/ncw/rclone/amazonclouddrive"
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/dropbox"
_ "github.com/ncw/rclone/googlecloudstorage"
_ "github.com/ncw/rclone/hubic"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/onedrive"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
_ "github.com/ncw/rclone/fs/all" // import all fs
)
// Globals
var (
// Flags
cpuprofile = pflag.StringP("cpuprofile", "", "", "Write cpu profile to file")
cpuProfile = pflag.StringP("cpuprofile", "", "", "Write cpu profile to file")
memProfile = pflag.String("memprofile", "", "Write memory profile to file")
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats (0 to disable)")
version = pflag.BoolP("version", "V", false, "Print the version number")
logFile = pflag.StringP("log-file", "", "", "Log everything to this file")
@@ -155,6 +147,18 @@ var Commands = []Command{
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "sha1sum",
ArgsHelp: "remote:path",
Help: `
Produces an sha1sum file for all the objects in the path. This
is in the same format as the standard sha1sum tool produces.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.Sha1sum(fdst, os.Stdout)
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "size",
ArgsHelp: "remote:path",
@@ -202,7 +206,8 @@ var Commands = []Command{
Name: "purge",
ArgsHelp: "remote:path",
Help: `
Remove the path and all of its contents.`,
Remove the path and all of its contents. Does not obey
filters - use remove for that.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.Purge(fdst)
},
@@ -210,6 +215,18 @@ var Commands = []Command{
MaxArgs: 1,
Retry: true,
},
{
Name: "delete",
ArgsHelp: "remote:path",
Help: `
Remove the contents of path. Obeys include/exclude filters.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.Delete(fdst)
},
MinArgs: 1,
MaxArgs: 1,
Retry: true,
},
{
Name: "check",
ArgsHelp: "source:path dest:path",
@@ -223,6 +240,19 @@ var Commands = []Command{
MinArgs: 2,
MaxArgs: 2,
},
{
Name: "dedupe",
ArgsHelp: "remote:path",
Help: `
Interactively find duplicate files and offer to delete all
but one or rename them to be different. Only useful with
Google Drive which can have duplicate file names.`,
Run: func(fdst, fsrc fs.Fs) error {
return fs.Deduplicate(fdst, fs.Config.DedupeMode)
},
MinArgs: 1,
MaxArgs: 1,
},
{
Name: "config",
Help: `
@@ -233,6 +263,20 @@ var Commands = []Command{
},
NoStats: true,
},
{
Name: "authorize",
Help: `
Remote authorization. Used to authorize a remote or headless
rclone from a machine with a browser - use as instructed by
rclone config.`,
Run: func(fdst, fsrc fs.Fs) error {
fs.Authorize(pflag.Args()[1:])
return nil
},
NoStats: true,
MinArgs: 1,
MaxArgs: 3,
},
{
Name: "help",
Help: `
@@ -277,21 +321,6 @@ func ParseFlags() {
pflag.Parse()
runtime.GOMAXPROCS(runtime.NumCPU())
fs.LoadConfig()
// Setup profiling if desired
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
err = pprof.StartCPUProfile(f)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
defer pprof.StopCPUProfile()
}
}
// ParseCommand parses the command from the command line
@@ -386,6 +415,44 @@ func main() {
redirectStderr(f)
}
// Setup CPU profiling if desired
if *cpuProfile != "" {
log.Printf("Creating CPU profile %q\n", *cpuProfile)
f, err := os.Create(*cpuProfile)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
err = pprof.StartCPUProfile(f)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
defer pprof.StopCPUProfile()
}
// Setup memory profiling if desired
if *memProfile != "" {
defer func() {
log.Printf("Saving Memory profile %q\n", *memProfile)
f, err := os.Create(*memProfile)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
err = pprof.WriteHeapProfile(f)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
err = f.Close()
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
}()
}
// Make source and destination fs
var fdst, fsrc fs.Fs
if len(args) >= 1 {
@@ -402,37 +469,37 @@ func main() {
StartStats()
}
// Run the actual command
if command.Run != nil {
var err error
for try := 1; try <= *retries; try++ {
err = command.Run(fdst, fsrc)
if !command.Retry || (err == nil && !fs.Stats.Errored()) {
break
}
if err != nil {
fs.Log(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
} else {
fs.Log(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
}
if try < *retries {
fs.Stats.ResetErrors()
}
}
if err != nil {
log.Fatalf("Failed to %s: %v", command.Name, err)
}
if !command.NoStats && (!fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0) {
fmt.Fprintln(os.Stderr, fs.Stats)
}
if fs.Config.Verbose {
fs.Debug(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
}
if fs.Stats.Errored() {
os.Exit(1)
}
os.Exit(0)
} else {
// Exit if no command to run
if command.Run == nil {
return
}
// Run the actual command
var err error
for try := 1; try <= *retries; try++ {
err = command.Run(fdst, fsrc)
if !command.Retry || (err == nil && !fs.Stats.Errored()) {
break
}
if err != nil {
fs.Log(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
} else {
fs.Log(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
}
if try < *retries {
fs.Stats.ResetErrors()
}
}
if err != nil {
log.Fatalf("Failed to %s: %v", command.Name, err)
}
if !command.NoStats && (!fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0) {
fmt.Fprintln(os.Stderr, fs.Stats)
}
if fs.Config.Verbose {
fs.Debug(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
}
if fs.Stats.Errored() {
os.Exit(1)
}
}

View File

@@ -1,6 +1,6 @@
// Log the panic to the log file - for oses which can't do this
//+build !windows,!unix
// +build !windows,!darwin,!dragonfly,!freebsd,!linux,!nacl,!netbsd,!openbsd
package main

View File

@@ -1,6 +1,6 @@
// Log the panic under unix to the log file
//+build unix
// +build darwin dragonfly freebsd linux nacl netbsd openbsd
package main

View File

@@ -4,7 +4,7 @@
//
// http://play.golang.org/p/kLtct7lSUg
//+build windows
// +build windows
package main

View File

@@ -1,30 +1,74 @@
// Package api implements the API for one drive
package api
// Package rest implements a simple REST wrapper
//
// All methods are safe for concurrent calling.
package rest
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"sync"
"github.com/ncw/rclone/fs"
)
const (
rootURL = "https://api.onedrive.com/v1.0" // root URL for requests
)
// Client contains the info to sustain the API
type Client struct {
c *http.Client
mu sync.RWMutex
c *http.Client
rootURL string
errorHandler func(resp *http.Response) error
headers map[string]string
}
// NewClient takes an oauth http.Client and makes a new api instance
func NewClient(c *http.Client) *Client {
return &Client{
c: c,
api := &Client{
c: c,
errorHandler: defaultErrorHandler,
headers: make(map[string]string),
}
api.SetHeader("User-Agent", fs.UserAgent)
return api
}
// defaultErrorHandler doesn't attempt to parse the http body, just
// returns it in the error message
func defaultErrorHandler(resp *http.Response) (err error) {
defer fs.CheckClose(resp.Body, &err)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
}
// SetErrorHandler sets the handler to decode an error response when
// the HTTP status code is not 2xx. The handler should close resp.Body.
func (api *Client) SetErrorHandler(fn func(resp *http.Response) error) *Client {
api.mu.Lock()
defer api.mu.Unlock()
api.errorHandler = fn
return api
}
// SetRoot sets the default root URL
func (api *Client) SetRoot(RootURL string) *Client {
api.mu.Lock()
defer api.mu.Unlock()
api.rootURL = RootURL
return api
}
// SetHeader sets a header for all requests
func (api *Client) SetHeader(key, value string) *Client {
api.mu.Lock()
defer api.mu.Unlock()
api.headers[key] = value
return api
}
// Opts contains parameters for Call, CallJSON etc
@@ -38,20 +82,13 @@ type Opts struct {
ContentLength *int64
ContentRange string
ExtraHeaders map[string]string
}
// checkClose is a utility function used to check the return from
// Close in a defer statement.
func checkClose(c io.Closer, err *error) {
cerr := c.Close()
if *err == nil {
*err = cerr
}
UserName string // username for Basic Auth
Password string // password for Basic Auth
}
// DecodeJSON decodes resp.Body into result
func DecodeJSON(resp *http.Response, result interface{}) (err error) {
defer checkClose(resp.Body, &err)
defer fs.CheckClose(resp.Body, &err)
decoder := json.NewDecoder(resp.Body)
return decoder.Decode(result)
}
@@ -62,6 +99,8 @@ func DecodeJSON(resp *http.Response, result interface{}) (err error) {
//
// it will return resp if at all possible, even if err is set
func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
api.mu.RLock()
defer api.mu.RUnlock()
if opts == nil {
return nil, fmt.Errorf("call() called with nil opts")
}
@@ -69,42 +108,52 @@ func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
if opts.Absolute {
url = opts.Path
} else {
url = rootURL + opts.Path
if api.rootURL == "" {
return nil, fmt.Errorf("RootURL not set")
}
url = api.rootURL + opts.Path
}
req, err := http.NewRequest(opts.Method, url, opts.Body)
if err != nil {
return
}
headers := make(map[string]string)
// Set default headers
for k, v := range api.headers {
headers[k] = v
}
if opts.ContentType != "" {
req.Header.Add("Content-Type", opts.ContentType)
headers["Content-Type"] = opts.ContentType
}
if opts.ContentLength != nil {
req.ContentLength = *opts.ContentLength
}
if opts.ContentRange != "" {
req.Header.Add("Content-Range", opts.ContentRange)
headers["Content-Range"] = opts.ContentRange
}
// Set any extra headers
if opts.ExtraHeaders != nil {
for k, v := range opts.ExtraHeaders {
headers[k] = v
}
}
// Now set the headers
for k, v := range headers {
if v != "" {
req.Header.Add(k, v)
}
}
req.Header.Add("User-Agent", fs.UserAgent)
if opts.UserName != "" || opts.Password != "" {
req.SetBasicAuth(opts.UserName, opts.Password)
}
api.mu.RUnlock()
resp, err = api.c.Do(req)
api.mu.RLock()
if err != nil {
return nil, err
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
// Decode error response
errResponse := new(Error)
err = DecodeJSON(resp, &errResponse)
if err != nil {
return resp, err
}
if errResponse.ErrorInfo.Code == "" {
errResponse.ErrorInfo.Code = resp.Status
}
return resp, errResponse
return resp, api.errorHandler(resp)
}
if opts.NoResponse {
return resp, resp.Body.Close()
@@ -133,7 +182,7 @@ func (api *Client) CallJSON(opts *Opts, request interface{}, response interface{
if err != nil {
return resp, err
}
if opts.NoResponse {
if response == nil || opts.NoResponse {
return resp, nil
}
err = DecodeJSON(resp, response)

150
s3/s3.go
View File

@@ -17,6 +17,8 @@ import (
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"regexp"
"strings"
@@ -26,6 +28,8 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
@@ -36,16 +40,29 @@ import (
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "s3",
NewFs: NewFs,
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 (also Dreamhost, Ceph)",
NewFs: NewFs,
// AWS endpoints: http://docs.amazonwebservices.com/general/latest/gr/rande.html#s3_region
Options: []fs.Option{{
Name: "env_auth",
Help: "Get AWS credentials from runtime (environment variables or EC2 meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.",
Examples: []fs.OptionExample{
{
Value: "false",
Help: "Enter AWS credentials in the next step",
}, {
Value: "true",
Help: "Get AWS credentials from the environment (env vars or IAM)",
},
},
}, {
Name: "access_key_id",
Help: "AWS Access Key ID - leave blank for anonymous access.",
Help: "AWS Access Key ID - leave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password) - leave blank for anonymous access.",
Help: "AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.",
}, {
Name: "region",
Help: "Region to connect to.",
@@ -78,10 +95,10 @@ func init() {
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}, {
Value: "other-v2-signature",
Help: "If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.",
Help: "If using an S3 clone that only understands v2 signatures\neg Ceph/Dreamhost\nset this and make sure you set the endpoint.",
}, {
Value: "other-v4-signature",
Help: "If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.",
Help: "If using an S3 clone that understands v4 signatures set this\nand make sure you set the endpoint.",
}},
}, {
Name: "endpoint",
@@ -194,19 +211,40 @@ func s3ParsePath(path string) (bucket, directory string, err error) {
// s3Connection makes a connection to s3
func s3Connection(name string) (*s3.S3, *session.Session, error) {
// Make the auth
accessKeyID := fs.ConfigFile.MustValue(name, "access_key_id")
secretAccessKey := fs.ConfigFile.MustValue(name, "secret_access_key")
var auth *credentials.Credentials
v := credentials.Value{
AccessKeyID: fs.ConfigFile.MustValue(name, "access_key_id"),
SecretAccessKey: fs.ConfigFile.MustValue(name, "secret_access_key"),
}
// first provider to supply a credential set "wins"
providers := []credentials.Provider{
// use static credentials if they're present (checked by provider)
&credentials.StaticProvider{Value: v},
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
&credentials.EnvProvider{},
// Pick up IAM role in case we're on EC2
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(), &aws.Config{
HTTPClient: &http.Client{Timeout: 1 * time.Second}, // low timeout to ec2 metadata service
}),
ExpiryWindow: 3,
},
}
cred := credentials.NewChainCredentials(providers)
switch {
case accessKeyID == "" && secretAccessKey == "":
fs.Debug(name, "Using anonymous access for S3")
auth = credentials.AnonymousCredentials
case accessKeyID == "":
case fs.ConfigFile.MustBool(name, "env_auth", false):
// No need for empty checks if "env_auth" is true
case v.AccessKeyID == "" && v.SecretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
cred = credentials.AnonymousCredentials
case v.AccessKeyID == "":
return nil, nil, errors.New("access_key_id not found")
case secretAccessKey == "":
case v.SecretAccessKey == "":
return nil, nil, errors.New("secret_access_key not found")
default:
auth = credentials.NewStaticCredentials(accessKeyID, secretAccessKey, "")
}
endpoint := fs.ConfigFile.MustValue(name, "endpoint")
@@ -220,7 +258,7 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
awsConfig := aws.NewConfig().
WithRegion(region).
WithMaxRetries(maxRetries).
WithCredentials(auth).
WithCredentials(cred).
WithEndpoint(endpoint).
WithHTTPClient(fs.Config.Client()).
WithS3ForcePathStyle(true)
@@ -234,7 +272,7 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
sign(accessKeyID, secretAccessKey, req.HTTPRequest)
sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
}
c.Handlers.Sign.Clear()
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
@@ -458,17 +496,38 @@ func (f *Fs) ListDir() fs.DirChan {
}
// Put the FsObject into the bucket
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: remote,
remote: src.Remote(),
}
return fs, fs.Update(in, modTime, size)
return fs, fs.Update(in, src)
}
// Check if the bucket exists
func (f *Fs) dirExists() (bool, error) {
req := s3.HeadBucketInput{
Bucket: &f.bucket,
}
_, err := f.c.HeadBucket(&req)
if err == nil {
return true, nil
}
if err, ok := err.(awserr.RequestFailure); ok {
if err.StatusCode() == http.StatusNotFound {
return false, nil
}
}
return false, err
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir() error {
exists, err := f.dirExists()
if err != nil || exists {
return err
}
req := s3.CreateBucketInput{
Bucket: &f.bucket,
ACL: &f.perm,
@@ -478,7 +537,7 @@ func (f *Fs) Mkdir() error {
LocationConstraint: &f.locationConstraint,
}
}
_, err := f.c.CreateBucket(&req)
_, err = f.c.CreateBucket(&req)
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
return nil
@@ -523,7 +582,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
}
srcFs := srcObj.fs
key := f.root + remote
source := srcFs.bucket + "/" + srcFs.root + srcObj.remote
source := url.QueryEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
req := s3.CopyObjectInput{
Bucket: &f.bucket,
Key: &key,
@@ -537,10 +596,15 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
return f.NewFsObject(remote), err
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
func (o *Object) Fs() fs.Info {
return o.fs
}
@@ -559,8 +623,11 @@ func (o *Object) Remote() string {
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Md5sum() (string, error) {
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
}
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(etag) {
@@ -635,15 +702,16 @@ func (o *Object) ModTime() time.Time {
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) {
func (o *Object) SetModTime(modTime time.Time) error {
err := o.readMetaData()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to read metadata: %s", err)
return
return err
}
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
// Guess the content type
contentType := fs.MimeType(o)
// Copy the object to itself to update the metadata
key := o.fs.root + o.remote
sourceKey := o.fs.bucket + "/" + key
@@ -652,15 +720,13 @@ func (o *Object) SetModTime(modTime time.Time) {
Bucket: &o.fs.bucket,
ACL: &o.fs.perm,
Key: &key,
CopySource: &sourceKey,
ContentType: &contentType,
CopySource: aws.String(url.QueryEscape(sourceKey)),
Metadata: o.meta,
MetadataDirective: &directive,
}
_, err = o.fs.c.CopyObject(&req)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
}
return err
}
// Storable raturns a boolean indicating if this object is storable
@@ -683,11 +749,21 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
}
// Update the Object from in with modTime and size
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
modTime := src.ModTime()
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
u.Concurrency = 2
u.LeavePartsOnError = false
u.S3 = o.fs.c
u.PartSize = s3manager.MinUploadPartSize
size := src.Size()
// Adjust PartSize until the number of parts is small enough.
if size/u.PartSize >= s3manager.MaxUploadParts {
// Calculate partition size rounded up to the nearest MB
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
}
})
// Set the mtime in the meta data

View File

@@ -42,7 +42,7 @@ func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }

View File

@@ -14,25 +14,25 @@ import (
// URL parameters that need to be added to the signature
var s3ParamsToSign = map[string]struct{}{
"acl": struct{}{},
"location": struct{}{},
"logging": struct{}{},
"notification": struct{}{},
"partNumber": struct{}{},
"policy": struct{}{},
"requestPayment": struct{}{},
"torrent": struct{}{},
"uploadId": struct{}{},
"uploads": struct{}{},
"versionId": struct{}{},
"versioning": struct{}{},
"versions": struct{}{},
"response-content-type": struct{}{},
"response-content-language": struct{}{},
"response-expires": struct{}{},
"response-cache-control": struct{}{},
"response-content-disposition": struct{}{},
"response-content-encoding": struct{}{},
"acl": {},
"location": {},
"logging": {},
"notification": {},
"partNumber": {},
"policy": {},
"requestPayment": {},
"torrent": {},
"uploadId": {},
"uploads": {},
"versionId": {},
"versioning": {},
"versions": {},
"response-content-type": {},
"response-content-language": {},
"response-expires": {},
"response-cache-control": {},
"response-content-disposition": {},
"response-content-encoding": {},
}
// sign signs requests using v2 auth

30
swift/auth.go Normal file
View File

@@ -0,0 +1,30 @@
package swift
import "github.com/ncw/swift"
// auth is an authenticator for swift
type auth struct {
swift.Authenticator
storageURL string
}
// newAuth creates a swift authenticator wrapper to override the
// StorageUrl method.
func newAuth(Authenticator swift.Authenticator, storageURL string) *auth {
return &auth{
Authenticator: Authenticator,
storageURL: storageURL,
}
}
// The public storage URL - set Internal to true to read
// internal/service net URL
func (a *auth) StorageUrl(Internal bool) string {
if a.storageURL != "" {
return a.storageURL
}
return a.Authenticator.StorageUrl(Internal)
}
// Check the interfaces are satisfied
var _ swift.Authenticator = (*auth)(nil)

View File

@@ -29,9 +29,10 @@ var (
// Register with Fs
func init() {
fs.Register(&fs.Info{
Name: "swift",
NewFs: NewFs,
fs.Register(&fs.RegInfo{
Name: "swift",
Description: "Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
NewFs: NewFs,
Options: []fs.Option{{
Name: "user",
Help: "User name to log in.",
@@ -56,6 +57,9 @@ func init() {
}, {
Help: "Memset Memstore UK v2",
Value: "https://auth.storage.memset.com/v2.0",
}, {
Help: "OVH",
Value: "https://auth.cloud.ovh.net/v2.0",
}},
}, {
Name: "tenant",
@@ -63,6 +67,9 @@ func init() {
}, {
Name: "region",
Help: "Region name - optional",
}, {
Name: "storage_url",
Help: "Storage URL - optional",
},
},
})
@@ -72,11 +79,11 @@ func init() {
// Fs represents a remote swift server
type Fs struct {
name string // name of this remote
c swift.Connection // the connection to the swift server
container string // the container we are working on
segmentsContainer string // container to store the segments (if any) in
root string // the path we are working on if any
name string // name of this remote
c *swift.Connection // the connection to the swift server
container string // the container we are working on
segmentsContainer string // container to store the segments (if any) in
root string // the path we are working on if any
}
// Object describes a swift object
@@ -168,11 +175,17 @@ func NewFsWithConnection(name, root string, c *swift.Connection) (fs.Fs, error)
}
f := &Fs{
name: name,
c: *c,
c: c,
container: container,
segmentsContainer: container + "_segments",
root: directory,
}
// StorageURL overloading
storageURL := fs.ConfigFile.MustValue(name, "storage_url")
if storageURL != "" {
f.c.StorageUrl = storageURL
f.c.Auth = newAuth(f.c.Auth, storageURL)
}
if f.root != "" {
f.root += "/"
// Check to see if the object exists - ignoring directory markers
@@ -210,6 +223,12 @@ func (f *Fs) newFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
fs: f,
remote: remote,
}
// Note that due to a quirk of swift, dynamic large objects are
// returned as 0 bytes in the listing. Correct this here by
// making sure we read the full metadata for all 0 byte files.
if info != nil && info.Bytes == 0 {
info = nil
}
if info != nil {
// Set info but not headers
o.info = *info
@@ -301,7 +320,7 @@ func (f *Fs) listFiles(ignoreStorable bool) fs.ObjectsChan {
defer close(out)
f.list(false, func(remote string, object *swift.Object) error {
if o := f.newFsObjectWithInfo(remote, object); o != nil {
// Storable does a full metadata read on 0 size objects which might be manifest files
// Storable does a full metadata read on 0 size objects which might be dynamic large objects
storable := o.Storable()
if storable || ignoreStorable {
out <- o
@@ -362,18 +381,27 @@ func (f *Fs) ListDir() fs.DirChan {
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: remote,
remote: src.Remote(),
}
return fs, fs.Update(in, modTime, size)
return fs, fs.Update(in, src)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir() error {
return f.c.ContainerCreate(f.container, nil)
// Check to see if container exists first
_, _, err := f.c.Container(f.container)
if err == nil {
return nil
}
if err == swift.ContainerNotFound {
return f.c.ContainerCreate(f.container, nil)
}
return err
}
// Rmdir deletes the container if the fs is at the root
@@ -422,10 +450,15 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
return f.NewFsObject(remote), nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Fs {
func (o *Object) Fs() fs.Info {
return o.fs
}
@@ -442,21 +475,29 @@ func (o *Object) Remote() string {
return o.remote
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Md5sum() (string, error) {
isManifest, err := o.isManifestFile()
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
}
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return "", err
}
if isManifest {
fs.Debug(o, "Returning empty Md5sum for swift manifest file")
isStaticLargeObject, err := o.isStaticLargeObject()
if err != nil {
return "", err
}
if isDynamicLargeObject || isStaticLargeObject {
fs.Debug(o, "Returning empty Md5sum for swift large object")
return "", nil
}
return strings.ToLower(o.info.Hash), nil
}
// isManifestFile checks for manifest header
func (o *Object) isManifestFile() (bool, error) {
// hasHeader checks for the header passed in returning false if the
// object isn't found.
func (o *Object) hasHeader(header string) (bool, error) {
err := o.readMetaData()
if err != nil {
if err == swift.ObjectNotFound {
@@ -464,8 +505,18 @@ func (o *Object) isManifestFile() (bool, error) {
}
return false, err
}
_, isManifestFile := (*o.headers)["X-Object-Manifest"]
return isManifestFile, nil
_, isDynamicLargeObject := (*o.headers)[header]
return isDynamicLargeObject, nil
}
// isDynamicLargeObject checks for X-Object-Manifest header
func (o *Object) isDynamicLargeObject() (bool, error) {
return o.hasHeader("X-Object-Manifest")
}
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
func (o *Object) isStaticLargeObject() (bool, error) {
return o.hasHeader("X-Static-Large-Object")
}
// Size returns the size of an object in bytes
@@ -509,12 +560,10 @@ func (o *Object) ModTime() time.Time {
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) {
func (o *Object) SetModTime(modTime time.Time) error {
err := o.readMetaData()
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to read metadata: %s", err)
return
return err
}
meta := o.headers.ObjectMetadata()
meta.SetModTime(modTime)
@@ -528,18 +577,13 @@ func (o *Object) SetModTime(modTime time.Time) {
newHeaders[k] = v
}
}
err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
}
return o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
}
// Storable returns if this object is storable
//
// It reads the metadata for <= directoryMarkerMaxSize byte objects then compares the
// Content-Type to directoryMarkerContentType - that makes it a
// directory marker which is not storable.
// It compares the Content-Type to directoryMarkerContentType - that
// makes it a directory marker which is not storable.
func (o *Object) Storable() bool {
return o.info.ContentType != directoryMarkerContentType
}
@@ -583,6 +627,23 @@ func (o *Object) removeSegments(except string) error {
return nil
}
// urlEncode encodes a string so that it is a valid URL
//
// We don't use any of Go's standard methods as we need `/` not
// encoded but we need '&' encoded.
func urlEncode(str string) string {
var buf bytes.Buffer
for i := 0; i < len(str); i++ {
c := str[i]
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' {
_ = buf.WriteByte(c)
} else {
_, _ = buf.WriteString(fmt.Sprintf("%%%02X", c))
}
}
return buf.String()
}
// updateChunks updates the existing object using chunks to a separate
// container. It returns a string which prefixes current segments.
func (o *Object) updateChunks(in io.Reader, headers swift.Headers, size int64) (string, error) {
@@ -610,7 +671,7 @@ func (o *Object) updateChunks(in io.Reader, headers swift.Headers, size int64) (
i++
}
// Upload the manifest
headers["X-Object-Manifest"] = fmt.Sprintf("%s/%s", o.fs.segmentsContainer, segmentsPath)
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", o.fs.segmentsContainer, segmentsPath))
headers["Content-Length"] = "0" // set Content-Length as we know it
emptyReader := bytes.NewReader(nil)
manifestName := o.fs.root + o.remote
@@ -621,9 +682,12 @@ func (o *Object) updateChunks(in io.Reader, headers swift.Headers, size int64) (
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
// Note whether this has a manifest before starting
isManifest, err := o.isManifestFile()
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
size := src.Size()
modTime := src.ModTime()
// Note whether this is a dynamic large object before starting
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
@@ -646,8 +710,8 @@ func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
}
}
// If file was a manifest then remove old/all segments
if isManifest {
// If file was a dynamic large object then remove old/all segments
if isDynamicLargeObject {
err = o.removeSegments(uniquePrefix)
if err != nil {
fs.Log(o, "Failed to remove old segments - carrying on with upload: %v", err)
@@ -661,7 +725,7 @@ func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
// Remove an object
func (o *Object) Remove() error {
isManifestFile, err := o.isManifestFile()
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
@@ -671,7 +735,7 @@ func (o *Object) Remove() error {
return err
}
// ...then segments if required
if isManifestFile {
if isDynamicLargeObject {
err = o.removeSegments("")
if err != nil {
return err

Some files were not shown because too many files have changed in this diff Show More