1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

...

1246 Commits

Author SHA1 Message Date
Nick Craig-Wood
87d64e7fb4 mount: use the equivalent of kernel_cache by default #FIXME WIP 2018-07-11 14:56:17 +01:00
Nick Craig-Wood
793f594b07 gcs: fix index out of range error with --fast-list fixes #2388 2018-07-09 17:00:52 +01:00
Nick Craig-Wood
4fe6614ae1 s3: fix index out of range error with --fast-list fixes #2388 2018-07-09 17:00:52 +01:00
Nick Craig-Wood
4c2fbf9b36 Add Jasper Lievisse Adriaanse to contributors 2018-07-08 11:01:56 +01:00
Jasper Lievisse Adriaanse
ed4f1b2936 sftp: fix typo in help text 2018-07-08 11:01:35 +01:00
Nick Craig-Wood
144c1a04d4 fs: Fix parsing of paths under Windows - fixes #2353
Before this copyto would parse windows paths incorrectly.

This change moves the parsing code into fspath and makes sure
fspath.Split calls fspath.Parse which does the parsing correctly for

This also renames fspath.RemoteParse to fspath.Parse for consistency
2018-07-06 23:16:43 +01:00
Nick Craig-Wood
25ec7f5c00 Add Onno Zweers to contributors 2018-07-05 10:05:24 +01:00
Onno Zweers
b15603d5ea webdav: document dCache and Macaroons 2018-07-05 10:04:57 +01:00
Nick Craig-Wood
71c974bf9a azureblob: documentation for authentication methods 2018-07-05 09:39:06 +01:00
Nick Craig-Wood
03c5b8232e Update github.com/Azure/azure-sdk-for-go #2118
This pulls in https://github.com/Azure/azure-sdk-for-go/issues/2119
which fixes the SAS URL support.
2018-07-04 09:25:13 +01:00
Nick Craig-Wood
72392a2d72 azureblob: list the container to see if it exists #2118
This means that SAS URLs which are tied to a single container will work.
2018-07-04 09:23:00 +01:00
Nick Craig-Wood
b062ae9d13 azureblob: add connection string and SAS URL auth - fixes #2118 2018-07-04 09:22:59 +01:00
Nick Craig-Wood
8c0335a176 build: fix for goimports format change
See https://github.com/golang/go/issues/23709
2018-07-03 22:33:15 +01:00
Nick Craig-Wood
794e55de27 mega: wait for events instead of arbitrary sleeping 2018-07-02 14:50:09 +01:00
Nick Craig-Wood
038ed1aaf0 vendor: update github.com/t3rm1n4l/go-mega - fixes #2366
This update fixes files being missing from mega directory listings.
2018-07-02 14:50:09 +01:00
Nick Craig-Wood
97beff5370 build: keep track of compile failures better in cross-compile 2018-07-02 10:09:18 +01:00
Nick Craig-Wood
b9b9bce0db ftp: fix Put mkParentDir failed: 521 for BunnyCDN - fixes #2363
According to RFC 959, error 521 is the correct error return to mean
"dir already exists", so add support for this.
2018-06-30 14:29:47 +01:00
Nick Craig-Wood
947e10eb2b config: fix error reading password from piped input - fixes #1308 2018-06-28 11:54:15 +01:00
Nick Craig-Wood
6b42421374 build: build macOS beta releases with native compiler on travis #2309 2018-06-26 09:39:44 +01:00
Nick Craig-Wood
fa051ff970 webdav: add bearer token (Macaroon) support for dCache - fixes #2360 2018-06-25 17:54:36 +01:00
Nick Craig-Wood
69164b3dda build: move non master beta builds into branch subdirectory 2018-06-25 16:49:04 +01:00
Nick Craig-Wood
935533e57f filter: raise --include and --exclude warning to ERROR so it appears without -v 2018-06-22 22:18:55 +01:00
Nick Craig-Wood
1550f70865 webdav: Don't accept redirects when reading metadata #2350
Go can't redirect PROPFIND requests properly, it changes the method to
GET, so we disable redirects when reading the metadata and assume the
object does not exist if we receive a redirect.

This is to work-around the qnap redirecting requests for directories
without /.
2018-06-18 12:22:13 +01:00
Nick Craig-Wood
1a65c3a740 rest: add NoRedirect flag to Options 2018-06-18 12:21:50 +01:00
Nick Craig-Wood
a29a1de43d webdav: if root ends with / then don't check if it is a file 2018-06-18 12:13:47 +01:00
Nick Craig-Wood
e7ae5e8ee0 webdav: ensure we call MKCOL with a URL with a trailing / #2350
This is an attempt to fix rclone and qnap interop.
2018-06-18 11:16:58 +01:00
Mateusz
56e1e82005 fs: added weekday schedule into --bwlimit - fixes #1822 2018-06-17 18:38:09 +01:00
lewapm
8442498693 backend/drive: add flag for keep revision forever - fixes #1525 2018-06-17 18:34:35 +01:00
Nick Craig-Wood
08021c4636 vendor: update all dependencies 2018-06-17 17:59:12 +01:00
Nick Craig-Wood
3f0789e2db deletefile: fix typo in docs 2018-06-17 16:58:37 +01:00
Nick Craig-Wood
7110349547 Start v1.42-DEV development 2018-06-16 21:25:58 +01:00
Nick Craig-Wood
a9adb43896 Version v1.42 2018-06-16 18:21:09 +01:00
Nick Craig-Wood
c47a4c9703 opendrive: re-read hash when updating objects
Previously this was reading a stale hash from the object leading to
broken integration tests.

This fixes these integration tests TestSyncDoesntUpdateModtime,
TestSyncAfterChangingFilesSizeOnly, TestSyncAfterChangingContentsOnly,
TestSyncWithUpdateOlder, TestSyncUTFNorm.
2018-06-15 14:50:17 +01:00
Nick Craig-Wood
d9d00a7dd7 rcat: remove --checksum flag from the docs as it is not usually effective 2018-06-14 16:15:54 +01:00
Nick Craig-Wood
b82e66daaa Add themylogin to contributors 2018-06-14 16:15:53 +01:00
themylogin
7d2861ead6 Adjust S3 upload concurrency with --s3-upload-concurrency 2018-06-14 16:15:17 +01:00
remusb
aaa8591661 cache: add non cached dirs on notifications - #2155 2018-06-13 23:57:26 +03:00
remusb
4df1794932 cache: fix panic when running without plex configs 2018-06-13 15:06:14 +03:00
Nick Craig-Wood
d18928962c dropbox: make dropbox for business folders accessible #2003
Paths prefixed with / on a dropbox for business plan will now start at
the root instead of the users home directory.
2018-06-13 11:03:34 +01:00
remusb
339fbf0df5 cache: reconnect plex websocket on failures 2018-06-12 22:58:15 +03:00
remusb
13ccb39819 cache: allow root to be expired from rc - #2237 2018-06-12 22:19:03 +03:00
remusb
f9a1a7e700 cache: fix root folder caching 2018-06-10 21:54:20 +03:00
Nick Craig-Wood
1c75581959 sync: fix TestCopyRedownload after ModifyWindow changes #2310 2018-06-10 17:34:00 +01:00
Nick Craig-Wood
4d793b8ee8 drive: remove part of workaround for #1675
Now that https://issuetracker.google.com/issues/64468406 has been
fixed, we can remove part of the workaround which fixed #1675 -
019adc35609c2136

This will make queries marginally more efficient.  We still need the
other part of the workaround since the `=` operator is case
insensitive.
2018-06-10 15:28:32 +01:00
Nick Craig-Wood
9289aead9b drive: Add --drive-acknowledge-abuse to download flagged files - fixes #2317
Also if rclone gets the cannotDownloadAbusiveFile suggest using the
--drive-acknowledge-abuse flag.
2018-06-10 15:25:21 +01:00
Filip Bartodziej
ce109ed9c0 log: password prompt output fixed for unix - partially fixes #2220 2018-06-10 12:57:45 +01:00
Filip Bartodziej
d7ac4ca44e cmd: deletefile command - fixes #2286 2018-06-10 12:49:33 +01:00
Nick Craig-Wood
1053d7e123 local: fix symlink/junction point directory handling under Windows
Before this commit rclone's handling of symlinks and junction points
under Windows was broken.  rclone treated them as files and attempted
to transfer them which gave the error "The handle is invalid".

Ultimately the cause of this was 3e43ff7414 which was a
workaround so files with reparse points (which are a kind of symlink)
would transfer correctly.

The solution implemented is to revert the above commit which will mean
that #614 will break again.  However there is now a work-around (which
will be signaled by rclone) to use the -L flag which wasn't available
when the original commit was made.

Fixes #2336
2018-06-10 12:25:03 +01:00
Nick Craig-Wood
017297af70 s3: Fix --s3-chunk-size which was always using the minimum - fixes #2345 2018-06-10 12:22:30 +01:00
remusb
4e8e5fed7d cache: clean remaining empty folders from temp upload path 2018-06-09 14:52:31 +03:00
remusb
c0f772bc14 cache: update internal tests and small fixes 2018-06-08 23:34:38 +03:00
Nick Craig-Wood
334ef28012 Add Benjamin Joseph Dag to contributors 2018-06-08 16:12:57 +01:00
Benjamin Joseph Dag
da45dadfe9 cmd: added --retries-sleep flag
The --retries-sleep flag can be used to sleep after each retry.
2018-06-08 16:12:24 +01:00
Nick Craig-Wood
05edb5f501 drive: Fix change list polling with team drives - fixes #2330
In the drive v3 conversion we forgot the IncludeTeamDriveItems
parameter when calling the changes API.  Adding it fixes the changes
polling with team drives.
2018-06-07 11:35:55 +01:00
Henning Surmeier
04d18d2a07 oauthutil: Use go template for web response
Every response is formed using the AuthResponseData struct together with
the AuthResponse html template.
2018-06-06 09:54:21 +01:00
Henning Surmeier
f1269dc06a onedrive: errorHandler for business requests
This implementation hopefully can handle all error requests from the
onedrive for business authentication.
I have only tested it with the "domain in unmanaged state" error.
2018-06-06 09:54:21 +01:00
Henning Surmeier
c5286ee157 oauthutil: support backend-specific errorHandler
This allows the backend to pass a errorHandler function to the doConfig
function. The webserver will pass the current request as a parameter to
the function.
The function can then examine all paramters and build the AuthError
struct which contains name, code, description of the error. A link to
the docs can be added to the HelpURL field.
oauthutil then takes care of formatting for the HTML response page. The
error details are also returned as an error in the server.err channel
and will be logged to the commandline.
2018-06-06 09:54:21 +01:00
Nick Craig-Wood
ba43acb6aa sync: fix TestCopyEmptyDirectories after ModifyWindow changes #2310 2018-06-04 21:41:25 +01:00
remusb
8a84975993 cache: cache lists using batch writes 2018-06-04 21:04:45 +03:00
ishuah
d758e1908e copy: create (pseudo copy) empty source directories to destination - fixes #1837 2018-06-04 11:01:14 +01:00
ishuah
737aed8412 Ensure items in srcEmptyDirs are actually empty 2018-06-04 11:01:14 +01:00
Stefan
4009fb67c8 fs: calculate ModifyWindow each time on the fly instead of relying on global state - see #2319, #2328 2018-06-03 20:45:34 +02:00
Nick Craig-Wood
3ef938ebde lsf: add --absolute flag to add a leading / onto path names 2018-06-03 10:42:34 +01:00
Nick Craig-Wood
5302e5f9b1 docs: add a note about SIGINFO for macOS 2018-06-02 17:38:05 +01:00
kubatasiemski
de8c7d8e45 cmd: add siginfo handler 2018-06-02 17:35:13 +01:00
Henning Surmeier
2a29f7f6c8 onedrive: Add troubleshooting to docs 2018-06-02 17:10:58 +01:00
Nick Craig-Wood
2b332bced2 Add Kasper Byrdal Nielsen to contributors 2018-05-31 09:42:36 +01:00
Kasper Byrdal Nielsen
aad75e6720 check: Add one-way argument
--one-way argument will check that all files on source matches the files on detination,
but not the other way. For example files present on destination but not on source will not
trigger an error.

Fixes: #1526
2018-05-31 09:42:16 +01:00
Stefan
2a806a8d8b mount: only print "File.rename error" if there actually is an error - see #2130 (#2322) 2018-05-29 19:19:17 +02:00
Nick Craig-Wood
500085d244 vendor: update github.com/dropbox/dropbox-sdk-go-unofficial #2158 2018-05-29 15:56:40 +01:00
Nick Craig-Wood
3d8e529441 rc: return error from remote on failure 2018-05-29 10:48:01 +01:00
Stefan
6607d8752c mountlib: add testcase to ensure the ModifyWindow is calculated on Mount (see #2002) (#2319) 2018-05-28 17:49:26 +02:00
Stefan
67e9ef4547 mount: delay rename if file has open writers instead of failing outright - fixes #2130 (#2249) 2018-05-24 20:45:11 +02:00
Nick Craig-Wood
d4213c0ac5 sftp: Fix slow downloads for long latency connections - fixes #1158
This was caused by using the sftp.File.Read method which resets the
streaming window after each call.  Replacing it with sftp.File.WriteTo
and an io.Pipe fixes the problem bringing the speed to the same as the
sftp binary.
2018-05-24 15:10:28 +01:00
Nick Craig-Wood
3a2248aa5f rc: add core/gc to run a garbage collection on demand 2018-05-24 15:10:28 +01:00
Nick Craig-Wood
573ef4c8ee rc: enable go profiling by default on the --rc port
This means you can use the pprof tool on a running rclone, eg

    go tool pprof http://localhost:5572/debug/pprof/heap
2018-05-24 15:10:28 +01:00
Nick Craig-Wood
7bf2d389a8 Add John Clayton to contributors 2018-05-22 11:48:20 +01:00
John Clayton
71b4f1ccab cache: use secure websockets for HTTPS Plex addresses 2018-05-22 11:47:57 +01:00
Nick Craig-Wood
e5ff375948 Use config.FileGet instead of fs.ConfigFileGet 2018-05-22 09:43:24 +01:00
Nick Craig-Wood
512f4b4487 Update error checking on fmt.Fprint* after errcheck update
Now we need to check or ignore errors on fmt.Fprint* explicitly -
previously errcheck just ignored them for us.
2018-05-22 09:41:13 +01:00
Nick Craig-Wood
a38f8b87ce docs: fix Nextcloud typo spotted by Eugene Mlodik 2018-05-16 16:43:52 +01:00
Nick Craig-Wood
9697754707 drive: Don't attempt to choose Team Drives when using rclone config create 2018-05-16 09:10:09 +01:00
Nick Craig-Wood
8e625e0bc3 config: add ConfirmWithDefault to change the default on AutoConfig 2018-05-16 09:09:41 +01:00
Nick Craig-Wood
e52ecba295 crypt: check the crypted hash of files when uploading #2303
This checks the checksum of the streamed encrypted data against the
checksum of the encrypted object returned from the remote and returns
an error if it is different.
2018-05-15 14:50:36 +01:00
Nick Craig-Wood
e62d2fd309 oauthutil: Fix custom redirect URL message - fixes #2306 2018-05-13 17:28:09 +01:00
Nick Craig-Wood
e56be0dfd8 lsf: Add --csv flag for compliant CSV output 2018-05-13 12:18:21 +01:00
Nick Craig-Wood
2a32e2d838 operations: turn ListFormatted into a Format method on ListFormat 2018-05-13 12:17:55 +01:00
Nick Craig-Wood
db4c206e0e lsjson: add MimeType to the output 2018-05-13 12:17:55 +01:00
Nick Craig-Wood
f77efc7649 lsf: Add 'm' format specifier to show the MimeType 2018-05-13 12:17:55 +01:00
Nick Craig-Wood
aadbcce486 fs: Add MimeTypeDirEntry to return the MimeType of a DirEntry 2018-05-13 12:17:55 +01:00
Nick Craig-Wood
f162116132 lsjson: add ID field to output to show Object ID - fixes #1901 2018-05-13 12:17:55 +01:00
Nick Craig-Wood
909c3a92d6 lsf: implement 'i' format for showing object ID - fixes #1476 2018-05-13 12:17:55 +01:00
Nick Craig-Wood
826975c341 fs: add Optional ID() method to Object and implement it in backends
ID() shows the internal ID of the Object if available.
2018-05-13 12:17:55 +01:00
Fabian Möller
6791cf7d7f atexit: prevent Run from being called on nil signal 2018-05-12 18:59:25 +02:00
Fabian Möller
d022c81d99 mount: ensure atexit gets run on interrupt
When running `rclone mount`, there were 2 signal handlers for `os.Interrupt`.

Those handlers would run concurrently and in some cases cause either unmount or `atexit.Run()` being skipped.

In addition `atexit.Run()` will get called in `resolveExitCode` to ensure cleanup on errors.
2018-05-12 10:40:44 +01:00
Nick Craig-Wood
cdde8fa75a opendrive: finish off #1026
* Fix errcheck and golint warnings
  * Remove unused constants and fix comments
  * Parse error responses properly
  * Fix Open with RangeOption
  * Fix Move, Copy and DirMove
  * Implement DirCacheFlush
  * Check interfaces are correct
  * Remove debugs and update overview
  * Correct feature flags
  * Pare replacement characters down to the minimum set
  * Add to the integration tests
2018-05-12 10:10:46 +01:00
Nick Craig-Wood
5ede6f6d09 Add Jakub Karlicek to contributors 2018-05-12 10:10:45 +01:00
Jakub Karlicek
53292527bb opendrive: fill out the functionality #1026
* Add Mkdir, Rmdir, Purge, Delete, SetModTime, Copy, Move, DirMove
 * Update file size after upload
 * Add Open seek
 * Set private permission for new folder and uploaded file
 * Add docs
 * Update List function
 * Fix UserSessionInfo struct
 * Fix socket leaks
 * Don’t close resp.Body in Open method
 * Get hash when listing files
2018-05-12 10:07:25 +01:00
Oliver Heyme
ec9894da07 opendrive: initial parts with download and upload working #1026 2018-05-12 10:07:16 +01:00
Nick Craig-Wood
ad02d1be3f fstest: update comments on how to run individual tests 2018-05-11 14:04:36 +01:00
Nick Craig-Wood
63f413f477 webdav: show all available information when printing errors 2018-05-11 08:43:53 +01:00
Nick Craig-Wood
f1ffe8e309 fstests: fix test crash if NewFs fails 2018-05-11 08:43:53 +01:00
Nick Craig-Wood
d85b9bc9d6 webdav: workarounds for biz.mail.ru
* Add "Depth: 1" on read metadata PROPFIND call
  * Accept 406 to mean directory already exists
2018-05-11 08:43:53 +01:00
Nick Craig-Wood
b07e51cf73 webdav: read the body of messages into the error if XML parse fails 2018-05-11 08:43:53 +01:00
Nick Craig-Wood
f073db81b1 drive: add --drive-alternate-export to fix large doc export - fixes #2243
The official drive APIs seem to have trouble downloading large
documents sometimes.

This commit adds a --drive-alternate-export flag to use an different,
unofficial set of export URLS which seem to download large files OK.
2018-05-10 10:04:39 +01:00
Nick Craig-Wood
9698a2babb gcs: low level retry all operations if necessary
Google cloud storage doesn't normally need retries, however certain
things (eg bucket creation and removal) are rate limited and do
generate 429 errors.

Before this change the integration tests would regularly blow up with
errors from GCS rate limiting bucket creation and removal.

After this change we low level retry all operations using the same
exponential backoff strategy as used in the google drive backend.
2018-05-10 09:24:09 +01:00
Nick Craig-Wood
5eecbd83ee bin: make make_test_files.go work properly on Windows 2018-05-09 16:59:29 +01:00
Nick Craig-Wood
e42edc8e8c copy, move: Copy single files directly, don't use --files-from work-around
Before this change rclone would inefficiently and confusingly read all
the files in the source directory when copy or moving a single file.
This caused confusion for the users to see log messages about files
which weren't part of the sync.

After the change the copy and move commands use the new infrastructure
made for the copyto and moveto command for single file copy and move.
2018-05-07 20:39:52 +01:00
Nick Craig-Wood
291954baba cmd: make names of argument parsing functions more consistent 2018-05-07 20:39:52 +01:00
Nick Craig-Wood
9d8d7ae1f0 mount,cmount: make --noappledouble --noapplexattr and change defaults #2287
Before this change we would unconditionally set the OSXFUSE options
noappledouble and noapplexattr.

However the noapplexattr options caused problems with copies in the
Finder.

Now the default for noapplexattr is false so we don't add the option
by default and the user can override the defaults using the
--noappledouble and --noapplexattr flags.
2018-05-07 20:37:09 +01:00
Nick Craig-Wood
6ce32e4661 mount,cmount: Add --volname flag and remove special chars from it #2287
Before this change rclone would set the volume name from the
remote:path normally.  However this has `:` and `/` in which make it
difficult to use in macOS.

Now rclone will remove the special characters and replace them with
spaces.  It also allows the volume name to be set with the --volname
flag.
2018-05-07 20:37:09 +01:00
Nick Craig-Wood
1755ffd1f3 mount: make Get/List/Set/Remove xattr return ENOSYS #2287
By default bazil fuse will return ENOTSUPP for these.  However if we
return ENOSYS then OSXFUSE (at least) will never call them again
saving round trips though fuse.
2018-05-07 20:37:09 +01:00
Nick Craig-Wood
aa5c5ec5d3 build: mask linter errors we can't fix 2018-05-05 17:32:41 +01:00
Nick Craig-Wood
e80ae4e09c build: remove unused struct fields spotted by structcheck 2018-05-05 17:32:41 +01:00
Nick Craig-Wood
1320e84bc2 build: remove unused code spotted by the deadcode linter 2018-05-05 17:32:41 +01:00
Nick Craig-Wood
cb5bd47e61 build: fix errors spotted by ineffassign linter
These were mostly caused by shadowing err and a good fraction of them
will have caused errors not to be propagated properly.
2018-05-05 17:32:41 +01:00
Nick Craig-Wood
790a8a9aed build: add gometalinter and gometalinter_install Makefile targets 2018-05-05 17:32:41 +01:00
Nick Craig-Wood
f1a43eca4d mount: make --daemon work for macOS without CGO 2018-05-05 16:23:47 +01:00
Nick Craig-Wood
7ea68f1fc6 sftp: require go1.9+ after golang.org/x/crypto/ssh update 2018-05-05 16:23:47 +01:00
Nick Craig-Wood
6427029c4e vendor: update all dependencies
* Update all dependencies
  * Remove all `[[constraint]]` from Gopkg.toml
  * Add in the minimum number of `[[override]]` to build
  * Remove go get of github.com/inconshreveable/mousetrap as it is vendored
  * Update docs with new policy on constraints
2018-05-05 15:52:24 +01:00
Nick Craig-Wood
21383877df cmd: make exit code 8 for --max-transfer exceeded 2018-05-05 12:58:28 +01:00
Nick Craig-Wood
f95835d613 fserrors: Look deeper into errors for Fatal/Retry/NoRetry errors.
Before this change fatal errors which were wrapped in a system error (eg a
URLError) were not recognised as fatal errors.
2018-05-05 12:58:28 +01:00
Nick Craig-Wood
be79b47a7a sync: log when we abandon the sync due to a fatal error 2018-05-05 12:58:28 +01:00
Nick Craig-Wood
be22735609 fs/accounting: fix deadlock on GetBytes
A deadlock could occur since we have now put a mutex on GetBytes from
StatsInfo.String (s.mu) - progress (acc.statmu) and read (acc.statmu)
- GetBytes (s.mu).

Fix this by giving stringSet its own locking and excluding the call
which caused the deadlock from the mutex in StatsInfo.String.
2018-05-05 12:58:28 +01:00
Nick Craig-Wood
1b1b3c13cd sync: add a test for aborting on max upload 2018-05-05 12:58:28 +01:00
Nick Craig-Wood
5c128272fd Implement --max-transfer flag to quit transferring at a limit #1655 2018-05-05 12:58:28 +01:00
Nick Craig-Wood
d178233e74 sync,march: check the cancel context on every channel send and receive
This fixes a deadlock on sync when all the copying channels receive a
Fatal Error.
2018-05-05 12:58:28 +01:00
Fabian Möller
98bf65c43b vfs: fix ChangeNotify for new or changed folders
Fixes #2251
2018-05-05 12:54:03 +01:00
Fabian Möller
3b5e70c8c6 drive: fix ChangeNotify for folders 2018-05-05 12:54:03 +01:00
Fabian Möller
bd3ad1ac3e vfs: add option to read source files in chunks 2018-05-05 12:49:42 +01:00
Fabian Möller
9fdf273614 fs: improve ChunkedReader
- make Close permanent and return errors afterwards
- use RangeSeek from the wrapped reader if present
- add a limit to chunk growth
- correct RangeSeek interface behavior
- add tests
2018-05-05 12:49:42 +01:00
Nick Craig-Wood
fe25cb9c54 drive: fix about (and df on a mount) for team drives - fixes #2288
Before this fix team drives would return the drive quota which is
incorrect and mis-leading.

Team drives don't appear to have an API for reading the bytes used or
the quota so we now return that the quota and usage are unknown.
2018-05-03 08:59:14 +01:00
Nick Craig-Wood
f2608e2a64 Add NoLooseEnds to contributors 2018-05-01 09:43:18 +01:00
NoLooseEnds
a5f1811892 cmd: Fixed a typo – minimum 2018-05-01 09:42:21 +01:00
Nick Craig-Wood
50dc5fe92e Add Rodrigo to contributors 2018-04-30 17:37:43 +01:00
Rodrigo
b7d2048032 WebDAV: Ignore Reason-Phrase in status line #2281 2018-04-30 17:36:38 +01:00
Nick Craig-Wood
3116249692 make sign_upload: only sign the v1.xx releases not the current ones 2018-04-30 17:29:50 +01:00
Nick Craig-Wood
d049e5c680 make build_dep: make sure we update the whole command for nfpm 2018-04-30 17:29:50 +01:00
Nick Craig-Wood
1c9572aba1 Add Piotr Oleszczyk to contributors 2018-04-30 17:29:50 +01:00
Piotr Oleszczyk
76f2cbeb94 sftp: Add --ssh-path-override flag #1474
The flag allows calculation of checksums on systems using
different paths for SSH and SFTP, like synology NAS boxes.
2018-04-30 17:05:10 +01:00
Nick Craig-Wood
0479c7dcf5 add github-release to make release_dep 2018-04-28 12:38:30 +01:00
Nick Craig-Wood
55674c0bfc Start v1.41-DEV development 2018-04-28 12:37:55 +01:00
Nick Craig-Wood
e4c380b2a8 Version v1.41 2018-04-28 11:46:27 +01:00
Nick Craig-Wood
74cbdea0ef Revert "copy: create (pseudo copy) empty source directories to destination"
Unfortunately this commit attempts to create every directory rather
than just the empty ones, so will need re-working.

Removing this feature for the 1.41 release

This reverts commit 0daced29db.
2018-04-28 10:02:32 +01:00
Nick Craig-Wood
a3bf6b9c2c drive, gcs: fix service account authentication - fixes #2279
This fixes a problem introduced in b78af517de where it would
attempt to read a non-existent service account file.
2018-04-28 09:33:43 +01:00
ishuah
0daced29db copy: create (pseudo copy) empty source directories to destination - fixes #1837 2018-04-27 16:15:32 +01:00
Matt Holt
b78af517de Add service_account_credentials for Google Cloud and Drive 2018-04-27 16:07:37 +01:00
Nick Craig-Wood
d8e88f10cd rc: take note of the --rc-addr flag too as per the docs - fixes #2184 2018-04-26 17:00:44 +01:00
Nick Craig-Wood
849db6699d Add Richard Yang to contributors 2018-04-26 16:23:52 +01:00
Richard Yang
a81ec00a8c dedupe: Add dedupe largest functionality - fixes #2269 2018-04-26 16:21:07 +01:00
Nick Craig-Wood
da4a5e1fb3 docs: note that copytruncate is needed for --log-file with logrotate #2259 2018-04-26 15:30:46 +01:00
Nick Craig-Wood
ae562b5a4f ftp: more workarounds for FTP servers to fix mkParentDir - fixes #2181 2018-04-26 14:58:04 +01:00
Nick Craig-Wood
c01177bc28 ftp: work around strange response from box FTP server
The Box FTP server seems to send 450 instead of 550 - work around that.

See: https://forum.rclone.org/t/using-box-com-over-ftp-problems/5313
2018-04-26 14:58:04 +01:00
Nick Craig-Wood
9f04ce282e rc: fix setting bwlimit to unlimited 2018-04-26 12:21:29 +01:00
Nick Craig-Wood
764440068e filter: fix --min-age and --max-age together check
Somehow in the code reorganisation of
11da2a6c9b the check for --min-age and
--max-age got switched around.  This commit fixes that and means you
can use --min-age and --max-age together.
2018-04-26 09:17:22 +01:00
Nick Craig-Wood
a703216286 filter: take double negatives out of filter flag help 2018-04-26 09:17:13 +01:00
Nick Craig-Wood
96a62d55a2 lsd: Add -R flag and fix and update docs for all ls commands 2018-04-26 08:55:03 +01:00
Nick Craig-Wood
d0f32b62fd Revert "build: Temporary workaround for golint being missing."
This reverts commit be8bd89674.
2018-04-25 16:17:54 +01:00
Mateusz Pabian
7c5f87842c vfs: filter files . and .. from readDir output - fixes #2135 2018-04-25 16:09:07 +01:00
Nick Craig-Wood
cc8799e0d6 Add new email address for Oliver Heyme to contributors 2018-04-25 15:52:41 +01:00
Oliver Heyme
da214973a1 [install] Add arm64/aarch64 suuport 2018-04-25 15:51:38 +01:00
Nick Craig-Wood
be8bd89674 build: Temporary workaround for golint being missing.
See https://github.com/golang/lint/issues/397
2018-04-24 11:22:38 +01:00
Nick Craig-Wood
9ab2521ef2 rc: autogenerate and tidy the docs and commands
* Rename rc/pid -> core/pid
  * Sort the output of `rc list`
  * Make a script to autogenerate the docs
  * Tidy docs
2018-04-23 20:57:17 +01:00
Nick Craig-Wood
21a10e58c9 rc: implement core/memstats to print internal memory usage info 2018-04-23 20:49:36 +01:00
Nick Craig-Wood
d36b80f587 vendor: update bazil.org/fuse - corrects df -i - fixes #2089 2018-04-21 22:57:08 +01:00
Nick Craig-Wood
24980d7123 config: fix typo in error message #2268 2018-04-21 22:49:30 +01:00
Nick Craig-Wood
870c58f7f8 sftp: fail soft with a debug on hash failure #1474
If md5sum/sha1sum fails we debug what it outputed on stderr and return
an empty hash indicating we didn't have a hash, rather than
hash.ErrUnsupported indicating that we don't support this hash type.

This fixes lots of ERROR messages for sftp and synology NAS which,
while it supports md5sum the SFTP paths and the SSH paths are
different so md5sum doesn't work.

We also stop disabling md5sum/sha1sum on errors since typically Hashes
is only checked at the start of a sync run and isn't expected to
change dynamically.
2018-04-21 09:02:53 +01:00
Nick Craig-Wood
b3c6f5f4b8 sftp: Update docs with Synology quirks 2018-04-21 09:02:53 +01:00
Nick Craig-Wood
311a962011 s3: Look in S3 named profile files for credentials - fixes #2243 2018-04-21 09:00:20 +01:00
Nick Craig-Wood
da7a77ef2e ftp: Fix no error on listing non-existent directory 2018-04-20 23:22:46 +01:00
Nick Craig-Wood
9fbc40c5b9 fstests: List missing dir must return ErrorDirNotFound for non bucket based remotes
List or ListR of an non existent directory must return
ErrorDirNotFound for non bucket based remotes.  For bucket based
remotes it may return ErrorDirNotFound or it may return no error and
no entries.
2018-04-20 23:22:46 +01:00
Nick Craig-Wood
56ce784301 Add hensur to contributors 2018-04-20 21:44:12 +01:00
hensur
8fe3037301 webdav: support SharePoint cookie authentication
This enables the use of the SharePoint webdav endpoint provided by
OneDrive for Business or Office365 Education Accounts. It enables
unverified accounts to be accessed with rclone via webdav as it isn't
possible through the normal onedrive backend.

This integrates the https://github.com/hensur/onedrive-cookie-test
package to fetch the required cookies to authorize against the
SharePoint webdav endpoint.
2018-04-20 21:43:54 +01:00
hensur
ba7ae2ee8c rest: Add RemoveHeader and SetCookie method
These methods extend the rest package to support the cookie header and
header deletion.

The deletion is necessary to delete an existing authorization header if
cookie auth should be used.
2018-04-20 21:43:54 +01:00
Nick Craig-Wood
dc59836021 webdav: strip leading and trailing / off root - fixes #2257 2018-04-20 21:43:54 +01:00
Nick Craig-Wood
1a3fb21a77 onedrive: add QuickXorHash support for OneDrive for business - fixes #2262 2018-04-20 21:03:03 +01:00
Nick Craig-Wood
bcdb7719c6 fs/hash: install QuickXorHash as a supported rclone hash type #2262 2018-04-20 21:02:57 +01:00
Nick Craig-Wood
c51d97c752 hashsum: make generic tool for any hash to produce md5sum like output 2018-04-20 21:02:37 +01:00
Nick Craig-Wood
57a5b72d60 onedrive: implement quickXorHash algorithm #2262 2018-04-20 21:02:37 +01:00
Nick Craig-Wood
34ba17deec Add Chris Redekop second email to contributors 2018-04-20 20:53:15 +01:00
Nick Craig-Wood
e3a1bc9cd3 Add Michael G. Noll to contributors 2018-04-20 20:51:31 +01:00
Chris Redekop
a35e62e15c s3: Add an option to disable checksum uploading - fixes #2213 2018-04-20 20:51:12 +01:00
Michael G. Noll
d1ca8b8959 sftp: update docs to match code, fix typos and clarify disable_hashcheck prompt 2018-04-20 20:49:49 +01:00
Nick Craig-Wood
a0c65deca8 box: Parse file/directory size as a floating point number
Very large directories can have their sizes returned as floating point
numbers, eg `1.0034576985781e+14` from the box API.

Before this change this would fail to parse as an int64.

This change parses the size as a float64 instead which will be
perfectly accurate for sizes up to 2**56 which is about 9 PB.

It is unknown whether box themselves use a float64 as an intermediate
representation in the API or not - it seems likely.

Fixes #2261
2018-04-19 21:04:52 +01:00
Nick Craig-Wood
1f255a8567 Add a mega.nz remote #163
Not supported yet:
  * Hash
  * ModTime
  * Server Side Copy

Otherwise fully functional and passing all the tests.
2018-04-18 21:09:54 +01:00
Nick Craig-Wood
f50b85278a vendor: github.com/t3rm1n4l for backend/mega 2018-04-18 21:09:54 +01:00
Nick Craig-Wood
9948b39dba about: don't attempt retries 2018-04-18 21:09:54 +01:00
Nick Craig-Wood
2b855751fc vfs,mount,cmount: use About to return the correct disk total/used/free
Disks total, used, free now shows correctly for mount and cmount (eg
`df` for Unix or in the Windows explorer).
2018-04-18 18:27:34 +01:00
Nick Craig-Wood
ef3bcec76c fs: Extend SizeSuffix to include TB and PB for rclone about 2018-04-17 21:53:42 +01:00
Nick Craig-Wood
1ac6dacf0f about: complete other providers and re-work internals
* Implement about for:
    * local, crypt, cache, drive, swift, hubic, onedrive, pcloud, dropbox
  * Implement `--json` and `---full` flag for `rclone about`
  * change About interface to return a Usage structure
  * Remove operations.About as it is too thin an interface
  * Implement Integration test

Relates to #1138 and #1564
2018-04-17 21:53:27 +01:00
a-roussos
94e277d759 about: add new command 'about' to get quota info from a remote
Implemented for drive only.

Relates to #1138 and #1564.
2018-04-17 21:50:14 +01:00
Nick Craig-Wood
b83814082b backend/http: if HEAD didn't return Content-Length use -1 as size
This means that the files will be treated as an unknown length and
will download properly.

Fixes #2247
2018-04-16 19:40:02 +01:00
Nick Craig-Wood
2b7957cc74 vfs: Only make the VFS cache if --vfs-cache-mode > Off
This stops the cache cleaner running unnecessarily and saves
resources.

This also helps with issue #2227 which was caused by a second mount
deleting objects in the first mounts cache.
2018-04-16 17:06:41 +01:00
Nick Craig-Wood
3d5106e52b drive: fix DirMove leaving a hardlinked directory behind #2245
This bug was introduced by the v3 API conversion in 07f20dd1fd.

The problem was that dircache.FindPath doesn't work for the root directory.

This adds an internal error for dircache.FindPath being called with
the root directory.  This makes a failing test, which the fix to the
drive backend fixes.

This also improves the DirCache integration test.
2018-04-15 10:12:21 +01:00
Nick Craig-Wood
29ce1c2747 fstest: fix CheckListingWithPrecision with non Windows safe chars
* Factor WinPath from fstest to fstests
  * Use it to normalize the directory names while checking them
2018-04-15 10:12:20 +01:00
Nick Craig-Wood
dc247d21ff s3: add in config for all the supported S3 providers #2140
These are AWS, Ceph, Dreamhost, IBM COS S3, Minio, Wasabi and Other.

This configures endpoints where known and makes sure config doesn't
appear where it isn't valid where possible.
2018-04-13 16:33:26 +01:00
Nick Craig-Wood
8c3740c2c5 config: Improve the Provider matching to have a negated match #2140
This makes it easier to make classes of provider in the config.
2018-04-13 16:06:37 +01:00
Giri Badanahatti
acd5d4377e config,s3: hierarchical configuration support #2140
This introduces a method of making provider specific configuration
within a remote.  This is useful particularly in s3.

This commit does the basic configuration in S3 for IBM COS.
2018-04-13 16:05:35 +01:00
Matthew Holt
9e4cd55477 size: Add --json flag 2018-04-13 13:38:06 +01:00
Nick Craig-Wood
2015f98f0c Add Craig Rachel to contributors 2018-04-13 13:36:46 +01:00
Craig Rachel
0e6faa2313 s3: add One Zone Infrequent Access storage class - fixes #2240 2018-04-13 13:36:25 +01:00
Nick Craig-Wood
905e40b3e6 Add Peter Baumgartner to contributors 2018-04-13 13:33:22 +01:00
Peter Baumgartner
1db68571fd s3,swift: Add --use-server-modtime
`--use-server-modtime` stops s3 and swift retrieving the modtime from metadata which enables a fast sync mode with the `--update` flag.
2018-04-13 13:32:17 +01:00
Nick Craig-Wood
6b67489133 Add Animosity022 to contributors 2018-04-13 13:26:41 +01:00
Animosity022
27dfcf303c cache: improve docs
This adds that the cache-chunk-path needs to be cleared manually if chunk-size is changed.
2018-04-13 13:26:26 +01:00
Nick Craig-Wood
e6d9720d7b Add Mateusz Piotrowski to contributors 2018-04-13 13:25:16 +01:00
Mateusz Piotrowski
196da4d903 dropbox: fix a typo in the docs 2018-04-13 13:24:58 +01:00
Nick Craig-Wood
18317a2747 vendor: update github.com/pkg/sftp because dep insisted 2018-04-13 13:23:55 +01:00
Nick Craig-Wood
ef412c1985 drive: fix misplaced log in dedupe MergeDirs 2018-04-13 13:23:55 +01:00
Nick Craig-Wood
d97fe3b824 fs/operations: make dedupe work with mega
* factor into its own files
  * remove assumptions about having a given hash type
  * make tests work if the remote has no hash
2018-04-13 13:23:55 +01:00
Nick Craig-Wood
792c9e185e Add Antoine GIRARD to contributors 2018-04-13 13:23:55 +01:00
Antoine GIRARD
1f681e585b fstests: fix typo 2018-04-13 13:23:08 +01:00
Nick Craig-Wood
e82452ce9a drive: check Open calls for google error messages
This should also enable Open calls to retry properly
2018-04-11 20:55:58 +01:00
Nick Craig-Wood
dcf8334673 fs: add --dump goroutines and --dump openfiles
These are developer flags useful for tracking down resource leaks.
2018-04-11 20:55:58 +01:00
Nick Craig-Wood
37be78705d fs/fshttp: limit MaxIdleConns and MaxIdleConnsPerHost
Before this change mega (which uses a different host per download)
would open too many sockets.
2018-04-11 20:51:28 +01:00
Nick Craig-Wood
4b5ff33125 fstest: retry cleaning the integration test directory if necessary 2018-04-11 20:51:13 +01:00
Nick Craig-Wood
d5b2ec32f1 local: add --local-no-check-updated to disable update checks #2206
This disables the `can't copy - source file is being updated` checks.
2018-04-09 15:27:58 +01:00
Nick Craig-Wood
aeedacfb50 Add Michael P. Dubner to contributors 2018-04-09 13:33:27 +01:00
Michael P. Dubner
92b266d361 rc: new call rc/pid - closes #2211 2018-04-09 13:33:04 +01:00
Nick Craig-Wood
05e32cfcf9 dropbox: Fix crypt+obfuscate on dropbox - fixes #2191
Before this change we lowercased the dropbox root directory.  This was
likely a leftover from when we used to build a dictionary to translate
the cases of dropbox files.  Now with the v2 API we can rely on
dropbox to do that for us, so we no longer need to lowercase the root.

This fixes issues using crypt with name obfuscation on dropbox.
2018-04-09 11:53:41 +01:00
Nick Craig-Wood
cbec59146a lsf: make sure we use localtime in tests - fixes Box integration tests
This problem was introduced with eca99b33c0.  It seems Box is the only
remote which converts time zones, so if you give it a GMT time zone,
it returns a PST time zone which represents the same instant.
2018-04-09 11:46:49 +01:00
Nick Craig-Wood
06e3fa3aba mounttest: reduce duplicated code and improve test output #2154
The written out list of tests was replaced with a nested test for
mount and cmount. The tests for each VFS cache mode were also replaced
with nested tests which makes the output and the code much cleaner.
2018-04-08 15:04:14 +01:00
Nick Craig-Wood
0fa700b3cf Make integration tests use go1.7+ nested tests #2154
* Removed generated code and code generator
  * Updated docs on how to write integration tests
  * Tidied up the actual integration tests
2018-04-08 15:04:14 +01:00
Nick Craig-Wood
42f0963bf9 local: retry remove on Windows sharing violation error #2202
Before this change asynchronous closes in cmount could cause sharing
violations under Windows on Remove which manifest themselves
frequently as test failures.

This change lets the Remove be retried on a sharing violation under
Windows.
2018-04-07 17:36:26 +01:00
Nick Craig-Wood
be54fd8f70 Remove builds conditional on go1.7 since that is now guaranteed #2154
Old fallback code was deleted and the go1.7 style code inlined where
appropriate.
2018-04-07 11:42:55 +01:00
Nick Craig-Wood
e5be471ce0 Use io.SeekStart/End/Current constants now for go1.7+ #2154 2018-04-07 11:42:36 +01:00
Nick Craig-Wood
80588a5a6b Replace "golang.org/x/net/context" with "context" for go1.7+ #2154 2018-04-07 11:42:08 +01:00
Nick Craig-Wood
67023f0040 Require go1.7 for compilation #2154
* Update the travis tests to exclude go1.6
  * Update the compile check to require go1.7+
  * Update misc go1.6 workarounds marked in the source
2018-04-06 20:18:14 +01:00
Nick Craig-Wood
32e02bd367 fstests: Fix TestObjectRemove failures
This was failing because TestPublicLink was causing the file to be
modified with Google drive.
2018-04-06 16:27:19 +01:00
Nick Craig-Wood
c749cf8d99 dropbox: fix repeatedly uploading the same files - fixes #2218
In #2134 and dfd0f4c5a4 some testing
changes got committed by accident which caused this regression.

This patch reverts it to how it was before.
2018-04-06 15:34:56 +01:00
Nick Craig-Wood
92cfb57fbd fstest/test_all: make -clean work better with google cloud storage 2018-04-06 14:54:33 +01:00
Nick Craig-Wood
0cb5c4aa73 gcs: detect bucket presence by listing it - fixes #2193
Doing it like this enables the use of a service account that only has
the "Storage Object Admin" role.
2018-04-06 12:45:15 +01:00
Nick Craig-Wood
0358e9e724 Add Eri Bastos to contributors 2018-04-05 20:20:53 +01:00
Eri Bastos
a69d8ec93b Fixed typo on ownCloud description 2018-04-05 20:20:31 +01:00
Nick Craig-Wood
92c5aa3786 s3: add --s3-chunk-size option - fixes #2203 2018-04-05 15:40:08 +01:00
Nick Craig-Wood
fbe1c7f1ea dropbox: remove unused code 2018-04-05 15:23:23 +01:00
Nick Craig-Wood
c4531daa43 local: work on spurious "can't copy - source file is being updated" errors #2206
Update all the time comparisons to use time.Time.Equal instead of ==

Improve the logging for that error so we can see exactly what has changed
2018-04-05 14:57:30 +01:00
remusb
6e11a25df5 cache: flush the memory cache after close 2018-04-04 23:25:53 +03:00
Nick Craig-Wood
0865e38917 Add Matt Holt to contributors 2018-04-04 14:56:50 +01:00
Nick Craig-Wood
ab2fa59fc4 Add Alexander Neumann to contributors 2018-04-04 14:56:50 +01:00
Matt Holt
e13f65b953 serve restic: Print actual listener address 2018-04-04 14:56:26 +01:00
Alexander Neumann
5b8977a053 serve restic: Disallow overwriting files in append-only mode - Fixes #2195
* Disallow overwriting files in append-only mode
* Add tests for append-only mode
2018-04-04 14:49:13 +01:00
remusb
1dea99ab20 cache: purge file data on notification 2018-04-03 23:24:45 +03:00
Nick Craig-Wood
06a8d3011d Add Chih-Hsuan Yen to contributors 2018-04-02 11:43:22 +01:00
Chih-Hsuan Yen
e7fd607078 Fix make tarball 2018-04-02 11:42:53 +01:00
Nick Craig-Wood
eca99b33c0 lsd,lsf: make sure all times we output are in local time - fixes #2183
Previous to this change times from lsd/lsf were output in whatever
timezone they were in whereas times from lsl were converted to
localtime.
2018-04-01 15:40:04 +01:00
remusb
e42cee5e02 cache: always forget parent dir for notifications - for #2117 2018-03-31 12:44:09 +03:00
Nick Craig-Wood
d45c750f76 Add Steve Kriss to contributors 2018-03-30 19:55:49 +01:00
Steve Kriss
2c2bb0f750 cmd/serve/restic: add append-only mode 2018-03-30 19:54:52 +01:00
Stefan
a8267d1628 link: allow creating public link to files and folders - closes #1562 2018-03-29 09:10:19 +02:00
Nick Craig-Wood
9df266a6b4 onedrive: Fix socket leak in multipart session upload
This had gone unnoticed until recently when we changed to uploading
all files with a multipart session.
2018-03-28 21:03:19 +01:00
Stefan Breunig
4d553ef701 drive: when initialized with a filepath, optional features used incorrect root path – see #2182 2018-03-28 20:33:39 +02:00
Nick Craig-Wood
1ba3ffdc59 Add Keith Goldfarb to contributors 2018-03-26 21:03:18 +01:00
Nick Craig-Wood
72f1b097a7 Add gbadanahatti to contributors 2018-03-26 21:03:18 +01:00
Nick Craig-Wood
885044d0a5 Add seuffert to contributors 2018-03-26 21:03:18 +01:00
Keith Goldfarb
6c10312c75 ncdu: added a "refresh" key - for #2174
Added Control+L key to refresh screen. Not sure if this is the
best choice, but it appears to be somewhat common.
2018-03-26 21:02:39 +01:00
gbadanahatti
e5aa5fe7d8 s3: docs: Minor format and URL changes to IBM COS Documentation content 2018-03-26 20:49:53 +01:00
Nick Craig-Wood
9b140b42c9 docs: fix current download link 2018-03-26 17:45:45 +01:00
Nick Craig-Wood
0bfbde8856 fstest: make ChangeNotify test clean up after itself and be more reliable
Previous to this fix old notifications could creep in and cause the
test to fail.  It also left files around which upset the TestObjectRemove test.

Fixes #2177
2018-03-24 19:57:44 +00:00
Nick Craig-Wood
98a924602f mount, cmount: set --attr-timeout default to 1s - fixes #2157
This  works around these 3 problems:

  * rclone using too much memory #2157
  * rclone not serving files to samba
    * https://forum.rclone.org/t/rclone-1-39-vs-1-40-mount-issue/5112
  * excessive time listing directories #2095
2018-03-23 22:42:51 +00:00
Nick Craig-Wood
7e80e609e8 docs: install.sh add macOS fallback for mktemp - fixes #2173 2018-03-23 22:24:28 +00:00
Mateusz Pabian
91b068ad3a sync: implement --ignore-errors - fixes #642 2018-03-23 22:01:10 +00:00
remusb
b52e34ef5e cache: add info log on notification - for #2150 2018-03-23 22:41:01 +02:00
Nick Craig-Wood
32e6eee341 release: add another step to update the release dependencies #2172 2018-03-23 12:43:18 +00:00
Nick Craig-Wood
c5f1d501ed docs: fix download links for .deb and .rpm 2018-03-23 12:43:18 +00:00
remusb
0ed0d9a7bc cache: integrate with Plex websocket 2018-03-22 21:21:03 +02:00
seuffert
d9c13bff83 add rc cache/stats 2018-03-22 21:16:16 +02:00
Daniel Loader
ce91289b09 docs: tweak rc cache documentation to give an example 2018-03-22 15:10:34 +00:00
Nick Craig-Wood
5ba5be9b37 gcs: ignore zero length directory markers at the root too 2018-03-21 20:10:00 +00:00
Nick Craig-Wood
e9a2cbec37 s3: ignore zero length directory markers at the root too 2018-03-21 20:09:37 +00:00
Nick Craig-Wood
4f6f07c074 cmount: fix error handling for Open/OpenDir 2018-03-21 19:44:30 +00:00
Nick Craig-Wood
f6020f1308 gcs: ignore zero length directory markers 2018-03-19 17:42:27 +00:00
Nick Craig-Wood
a46f2a9eb7 s3: ignore zero length directory markers - fixes #1621 2018-03-19 17:41:46 +00:00
Nick Craig-Wood
911a78ce6d sftp: require go1.8+ after github.com/pkg/sftp update 2018-03-19 16:37:40 +00:00
Nick Craig-Wood
d64789528d vendor: update all dependencies 2018-03-19 15:51:38 +00:00
Nick Craig-Wood
940df88eb2 Start v1.40-DEV development 2018-03-19 14:20:48 +00:00
Nick Craig-Wood
19ca9fb939 release: Put the releases into a v1.XX subdirectory 2018-03-19 14:20:09 +00:00
Nick Craig-Wood
26f1c55987 Version v1.40 2018-03-19 10:06:13 +00:00
Nick Craig-Wood
1afac32d80 serve restic: script for running integration test against all remotes 2018-03-18 19:15:39 +00:00
Nick Craig-Wood
26fbd00b4f serve restic: don't buffer the JSON output in memory for the list command 2018-03-18 16:26:58 +00:00
Nick Craig-Wood
1313b529ff serve restic: use ListR (--fast-list) if available
For Restic's use case, --fast-list will use less transactions and
calling ListR directly means we can avoid the usual memory overhead.
2018-03-18 16:22:05 +00:00
Nick Craig-Wood
82e835d6fc serve restic: make it easy to run integration tests against any remote
Just `cd cmd/serve/restic` then `go test -v -remote TestRemote:`
2018-03-18 14:23:56 +00:00
Nick Craig-Wood
fa867a9a4c serve restic: implement accounting for uploads and downloads
This means the bandwidth stats will be correct and the bandwidth
throttling will work correctly.  This was forgotten as a previous
iteration of the code was using the higher level operations.Rcat which
took care of this.
2018-03-18 14:19:43 +00:00
Nick Craig-Wood
38d9475a34 release: include a source tarball and sign everything #1449 2018-03-17 15:06:04 +00:00
Nick Craig-Wood
c21c7e75b0 Add Stefan Lindblom to contributors 2018-03-17 12:12:23 +00:00
Stefan Lindblom
c8d095612a drive: Document process for service account and impersonation 2018-03-17 12:11:48 +00:00
Nick Craig-Wood
012d4a1235 docs: fix download icon 2018-03-17 12:00:14 +00:00
Nick Craig-Wood
854d3c3025 Add Dave Pedu to contributors 2018-03-17 12:00:14 +00:00
Dave Pedu
5bedc4c668 crypt: fix path in docs 2018-03-17 11:59:25 +00:00
Stefan
86892467d9 config: load config file only on first access (closes #1659, closes #2096) (#2147) 2018-03-17 12:36:30 +01:00
Nick Craig-Wood
e62fe06763 s3: document --ignore-checksum workaround for KMS #1824 2018-03-17 10:51:45 +00:00
Nick Craig-Wood
4295428a0f fs/accounting: add remote control of bwlimit 2018-03-17 10:34:02 +00:00
Nick Craig-Wood
2db0c4dd95 vfs: add remote control for directory cache flushing 2018-03-17 10:34:02 +00:00
Nick Craig-Wood
5bf639048f sync: log an error that --track-renames doesn't work with sync or move
Fixes #2008
2018-03-17 10:34:02 +00:00
remusb
4924ac2f17 cache: reduce log level for plex api - for #2102 2018-03-17 11:57:36 +02:00
Nick Craig-Wood
d4cca8d9f9 onedrive: fix upload of zero length files #1716
Unfortunately multi part upload can't upload zero length files so
bring back the single part upload for zero length files only.

This was broken when we made all uploads multipart uploads.
2018-03-17 09:48:28 +00:00
Nick Craig-Wood
a9e386b153 Add wolfv to contributors 2018-03-17 09:06:51 +00:00
wolfv
117238211b docs: Change log levels to all caps - fixes #2101 2018-03-17 09:06:51 +00:00
Oliver Heyme
645cf5ec0f onedrive: fix wrong upload endpoint and createDate #1716
This fixes the problem introduced by 7f744033d8
2018-03-16 19:18:51 +00:00
Nick Craig-Wood
d1bb8efb88 sftp: follow symlinks correctly - fixes #2145
The sftp library delivers the attributes of the symlink rather than
the object pointed to in directory listings, however when we use Stat
from the library it points to the objects.

Previous to this fix this caused items pointed to by symlinks to be
unusable.

After the fix both symlinked files and directories work as expected.
2018-03-16 15:36:47 +00:00
Nick Craig-Wood
c19e675ca6 vfs: unify locking for RWFileHandle.openPending,.close and File.Delete #2141
Without this fix the cached file can be removed as the file is being
uploaded or downloaded.  This can cause the directory listings to
become inconsistent (this issue) or data loss (if a retry was needed
in the Copy).

Remove file needs to be excluded from running at the same time as both
openPending and close so it makes sense to unify the locking between
all 3.
2018-03-15 20:49:07 +00:00
Nick Craig-Wood
34c45a7c04 mount, cmount: remove addition of O_CREATE to flags on file open #2141
Previously this was adding it in to all file opens which was causing
inefficiencies under Windows where it stats the file using
open/fstat/close.

This change will make stat operations run much quicker under Windows
as they won't have to open the underlying file

This problem was introduced in61b6159a05336bd7ba105766de2d2ff171f7fb81
where we added O_CREATE to all file opens and creates.
2018-03-15 20:48:56 +00:00
Nick Craig-Wood
0a0318df20 Add Leo R. Lundgren to contributors 2018-03-15 20:24:42 +00:00
Leo R. Lundgren
04e055fc06 sftp: Add --sftp-ask-password flag to prompt for password when needed - #2137 2018-03-15 20:24:30 +00:00
Nick Craig-Wood
d551137635 Add Giri Badanahatti to contributors 2018-03-15 20:21:12 +00:00
Giri Badanahatti
aba43cd3a4 Documention for IBM COS (S3) configuration. 2018-03-15 20:20:43 +00:00
Oliver Heyme
7f744033d8 onedrive: Removed upload cutoff and always do session uploads
Set modtime on copy


Added versioning issue to OneDrive documentation
2018-03-15 20:18:11 +00:00
remusb
078d705dbe cache: notify vfs and support crypt in rpc - #2111 2018-03-15 11:39:16 +02:00
Nick Craig-Wood
5981f9fab5 acd: disable integration tests
We no longer have any working keys for Amazon Cloud Drive so disable
the integration tests.
2018-03-14 22:44:46 +00:00
Alexander Neumann
84776c4e43 serve/restic: Remove log message on Close 2018-03-14 21:50:33 +00:00
Nick Craig-Wood
c1a3e363a6 mount: return ENOSYS rather than EIO on attempted link
This fixes FileZilla accessing an rclone mount served over sftp.

See: https://forum.rclone.org/t/moving-files-on-rclone-mount-with-filezilla/5029
2018-03-14 21:10:20 +00:00
Nick Craig-Wood
7ccc6080b0 serve restic: add more info to GET request error 2018-03-14 21:09:47 +00:00
remusb
677971643c cache: add support for rc 2018-03-14 22:58:20 +02:00
remusb
f4a1c1163c rc: update doc with supported params 2018-03-14 22:58:20 +02:00
remusb
97b48cf988 rc: add support for Go 1.6 2018-03-14 22:58:20 +02:00
Nick Craig-Wood
86e5a35491 Implement Remote Control for rclone #2111
This implements a remote control protocol activated with the --rc flag
and a new command `rclone rc` to use that interface.

Still to do
  * docs - need finishing
  * tests
2018-03-14 22:58:20 +02:00
Nick Craig-Wood
8bb2854fe4 httplib: allow the flags to be prefixed when instantiating a server 2018-03-14 22:58:20 +02:00
Remus Bunduc
d76da1f5fd cache: fix dir cache issue - #2117 2018-03-14 11:08:30 +02:00
Nick Craig-Wood
89748feaa5 s3: update docs to discourage use of v2 auth - fixes #2120
From testing it appears that CEPH no longer works properly with v2
auth and neither does Dreamhost, so update the docs anc configuration
to recommend v4 auth.
2018-03-13 20:47:29 +00:00
Nick Craig-Wood
dfd0f4c5a4 sync: when using --backup-dir don't delete files if we can't set their modtime
This is a problem when syncing a file which just needed its modtime
set with dropbox which can't set the mod time of a file without
re-uploading it.

Before this change we would delete the file, then the server side move
would fail moving the file to the backup-dir because it no longer
existed.

After this change the destination file is moved to the backup-dir
instead of being deleted and the new file is uploaded.

Fixes #2134
2018-03-13 16:05:06 +00:00
Nick Craig-Wood
0c9dc006c5 fs: make display of default values of --min-age/--max-age be off - Fixes #2121 2018-03-13 09:06:07 +00:00
Nick Craig-Wood
4e90ad04d5 serve restic: only accept v2 API requests for list 2018-03-11 17:35:01 +00:00
Nick Craig-Wood
43c7ea81df Add Alexander Neumann to contributors 2018-03-11 17:35:01 +00:00
Alexander Neumann
fa003e89b6 serve restic: When listing return empty list instead of 'null' 2018-03-11 14:48:46 +00:00
Alexander Neumann
5114b11d6f serve restic: add http2 server on stdin/stdout 2018-03-11 14:48:46 +00:00
Alexander Neumann
f832433fa5 serve restic: Return empty list for non-existing dirs 2018-03-11 14:48:43 +00:00
Nick Craig-Wood
d073efdc6c serve restic: serves a remote in restic REST API format 2018-03-11 14:43:03 +00:00
Nick Craig-Wood
9e48748182 httplib: Note that authentication is a good idea for non localhost 2018-03-11 14:38:54 +00:00
Nick Craig-Wood
b6058e0106 docs/install.sh: don't create root owned .config/rclone directory #2127 2018-03-10 11:09:13 +00:00
Nick Craig-Wood
66c69fe620 mount: wait longer for consistency after rm in tests 2018-03-09 23:15:38 +00:00
Nick Craig-Wood
a2336ad774 vfs: fix deadlock in mount tests
This was caused by this sequence of calls

1> file.Release
1> file.close  -> takes the file lock
2> vfs.waitforWriters
2> dir.walk -> takes the dir lock
1> file.setObject
1> dir.addObject -> attempts to take the dir lock - BLOCKS
2> file.activeWriters -> tries to take file lock - BLOCKS - DEADLOCK

The fix is to make activeWriters not take the file lock and use atomic
operations to read the number of writers instead.
2018-03-09 23:15:38 +00:00
Nick Craig-Wood
7713acf23d mount: skip failing test TestFileModTimeWithOpenWriters on Windows 2018-03-09 23:15:38 +00:00
Nick Craig-Wood
473a388f6d mount: disable failing test TestWriteFileDoubleClose on OSX 2018-03-09 23:15:37 +00:00
Nick Craig-Wood
c8a4d437a0 Make travis test mount and cmount - fixes #2100
Previously FUSE wasn't found in the container so these tests weren't
run.  Move to VM based testing and install FUSE dependencies.
2018-03-09 23:15:37 +00:00
Nick Craig-Wood
09c14af6d1 cmd: Fix go routines at exit message to make it less confusing 2018-03-09 17:15:48 +00:00
Jakub Tasiemski
acae10cd6f lsjson: add --encrypted to show encrypted name #1765 2018-03-09 08:44:02 +00:00
Nick Craig-Wood
0861207ace fstest/test_all: set cache backend wait time to 30m to fix integration tests 2018-03-08 21:14:09 +00:00
Nick Craig-Wood
a7dbf32c53 cache: Implement --cache-db-wait-time flag
This can be used to make the cache wait for other running cache
backends to finish rather than erroring after 1 second.
2018-03-08 21:14:09 +00:00
Nick Craig-Wood
6025bb6ad1 local: fix race conditions updating the hashes
This was causing occasional test failures for the -race test of mount
and cmount.
2018-03-08 21:08:41 +00:00
Remus Bunduc
70f07fd3ac fs: add ChangeNotify and backend support for it (#2094)
* fs: rename DirChangeNotify to ChangeNotify

* cache: switch to ChangeNotify

* ChangeNotify: keep order of notifications
2018-03-08 22:03:34 +02:00
Nick Craig-Wood
b3f55d6bda vendor: Update github.com/Unknwon/goconfig to fix section listing
This fixes listing sections just after creation which means the rclone
config list will have all the keys in now.
2018-03-08 13:18:27 +00:00
Nick Craig-Wood
d9094f1a45 vendor: Gopkg.lock file format changes only after go dep update 2018-03-08 13:16:59 +00:00
Nick Craig-Wood
572ee5ec96 Sign the tags as part of the release process #1449 2018-03-07 15:18:13 +00:00
Nick Craig-Wood
316dac25c2 travis: add encrypted GITHUB_USER and GITHUB_TOKEN for using the API 2018-03-07 10:18:10 +00:00
Nick Craig-Wood
ee3c45676f bin/get-github-release.go: use GITHUB_USER/GITHUB_TOKEN when available
This should help with rate limiting problems when running under
travis.
2018-03-07 10:18:09 +00:00
Nick Craig-Wood
2e7e15461b bin/get-github-release.go: report body of HTTP responses with errors 2018-03-07 10:18:06 +00:00
Nick Craig-Wood
0175332987 vfs: fix applying modtime for an open Write Handle
The symptom of this was that the time set when the file was open was
lost.  This was causing one of the mount tests to fail too.
2018-03-06 21:58:11 +00:00
Nick Craig-Wood
85e0b87c99 build: add .deb and .rpm output for the build
This uses https://github.com/goreleaser/nfpm to create the .deb and
.rpm packages from the standard build output.
2018-03-06 12:37:44 +00:00
Nick Craig-Wood
d41017a277 A script to download and install the latest release of a github package 2018-03-06 12:37:44 +00:00
Nick Craig-Wood
fc32fee4ad mount, cmount: add --attr-timeout to control attribute caching in kernel
This flag allows the attribute caching in the kernel to be controlled.
The default is 0s - no caching - which is recommended for filesystems
which can change outside the control of the kernel.

Previously this was at the default meaning it was 60s for mount and 1s
for cmount.  This showed strange effects when files changed on the
remote not via the kernel.  For instance Caddy would serve corrupted
files for a while when serving from an rclone mount when a file
changed on the remote.
2018-03-04 11:20:22 +00:00
Nick Craig-Wood
5795bd7db6 vfs: update cached copy if we know it has changed even if pending opens
This fixes a problem with Caddy serving corrupted files out of the VFS
cache when the file on the remote changed.
2018-03-04 11:20:22 +00:00
Nick Craig-Wood
9b011ce7e4 vfs: keep track of number of open RWHandles 2018-03-04 11:20:22 +00:00
Nick Craig-Wood
5e334eedd2 vfs: re-use the File objects when re-reading the directory
Make it so that d.items is never nil to simplify the code

This should help with inconsistent reads when the source object changes.
2018-03-04 11:20:22 +00:00
Nick Craig-Wood
7fb53a031c vfs: don't cache the object in read and read/write handles
This should help with inconsistent reads when the source object changes.
2018-03-04 11:20:22 +00:00
ishuah
ebfeec9fb4 mount: run rclone mount in the background - fixes #723 2018-03-04 14:06:07 +03:00
ishuah
90af7af9a3 added dependency github.com/sevlyar/go-daemon 2018-03-04 14:06:07 +03:00
Nick Craig-Wood
fe8eeec5b5 cache: improve efficiency with RangeOption and RangeSeek #1825
* All remotes now support RangeOption so remove SeekOption
  * Correct off by one error as RangeOption arguments are inclusive.
  * Use RangeSeek in preference to Seek if available
2018-03-02 17:10:56 +00:00
Nick Craig-Wood
e0eb666dbf fs/walk: fix new golint warning about unused variables in range 2018-03-02 17:01:58 +00:00
Nick Craig-Wood
7d4da1c66a local: fix crash on Stat error while reading a file 2018-03-01 13:17:40 +00:00
Nick Craig-Wood
f3e982d3bf azureblob,b2,gcs,qingstor,s3,swift: Don't check for bucket/container presense if listing was OK
In a typical rclone copy to a bucket/container based remote, before
this change we were doing a list, followed by a HEAD of the bucket to
check it existed before doing the copy.  The fact the list succeeded
means the bucket exists so mark it OK at that point.

Issue #1421
2018-03-01 12:11:34 +00:00
Nick Craig-Wood
3f9d0d3baf docs: improve --files-from documentation 2018-03-01 09:59:50 +00:00
Nick Craig-Wood
e9fd2250eb Make titles smaller in issue template 2018-02-28 22:05:49 +00:00
Nick Craig-Wood
769aa860f2 Rewrite greeting message for issue template inside HTML quoting 2018-02-28 21:58:41 +00:00
Nick Craig-Wood
fdebf9da31 local: Downgrade "invalid cross-device link: trying copy" to debug - Fixes #1875 2018-02-28 21:27:34 +00:00
Nick Craig-Wood
77f344a69d pacer: attempt to fix occasional "beginSleep didn't fire" test failures 2018-02-27 11:06:59 +00:00
Nick Craig-Wood
62540b4007 docs: clarify beta docs and add link to tip.rclone.org 2018-02-27 10:58:48 +00:00
Fabian Möller
21faac6e6c Add David0rk to contributors 2018-02-27 10:06:56 +01:00
Fabian Möller
167a4396c7 drive: remove debug binary 2018-02-27 09:59:06 +01:00
David0rk
1585aa61c1 docs: update install.sh shebang (#2097)
change shebang to bash to avoid syntax errors
2018-02-27 09:32:01 +01:00
Nick Craig-Wood
b91bd32489 vfs: Fix TestWriteFileDoubleClose with --vfs-cache-mode >= writes
This was causing the file to be closed on Flush() instead of Release()
when the file was opened with O_TRUNC.
2018-02-26 21:26:32 +00:00
Nick Craig-Wood
c3d0f68923 vfs: fix truncation work-around on Windows
This no longer needs to deal with O_RDONLY and O_TRUNC since we
disallow this earlier.  This also fixes the code to just do it for
O_APPEND, not for everything.
2018-02-26 19:46:38 +00:00
Nick Craig-Wood
f57e92b9a5 vfs: fix creation of files when truncating #2083
As spotted by @B4dM4n
2018-02-26 19:37:58 +00:00
Nick Craig-Wood
baf9ee5cf7 vfs: update cached copy if we know it has changed before using it
Before this change we would have to wait for the --vfs-cache-max-age
to expire before getting an update.
2018-02-26 18:00:51 +00:00
Nick Craig-Wood
354f1ad722 vfs: Use operations.Copy instead of CopyFile for efficiency 2018-02-26 17:54:18 +00:00
Nick Craig-Wood
54deb01f00 vfs: Make OpenFile and friends return EINVAL if O_RDONLY and O_TRUNC
Before this change Open("name", os.O_RDONLY|os.O_TRUNC) would have
truncated the file.  This is what Linux does, but is counterintuitive.
POSIX states this is undefined, so return an error in this case
instead.  This preserves the invariant O_RDONLY => file is not
changed.
2018-02-26 17:04:27 +00:00
Nick Craig-Wood
3282fd26af vfs: clean path names before using them in the cache
This avoids inconsistent cache behaviour on open("potato/")
close("potato").

The tests were also adjusted to make them more comprehensive.
2018-02-26 16:59:14 +00:00
Nick Craig-Wood
88d830c7b7 vfs: create cache.opens and use it in place of cache.get to avoid potential race 2018-02-26 16:58:02 +00:00
Nick Craig-Wood
724120d2f3 local: make DirMove return fs.ErrorCantDirMove to allow fallback
Before this change `rclone move localdir /mnt/different-fs` would
error.  Now it falls back to moving individual files, which in turn
falls back to copying individual files across the filesystem boundary.
2018-02-26 12:55:05 +00:00
Nick Craig-Wood
25bbc5d22b drive: make --drive-auth-owner-only look in all directories
Previously it was ignoring directories which weren't owned by the user
which meant it was ignoring files owned by the user in those
directories.
2018-02-26 12:30:59 +00:00
Fabian Möller
00adf40f9f cryptdecode: use Cipher instead of NewFs (#2087)
* crypt: extract NewCipher out of NewFs
* cryptdecode: make use of crypt.NewCipher

Fixes #2075
2018-02-25 12:57:14 +01:00
Fabian Möller
aeefa34f62 fstests: add TestInternal (#2085)
TestInternal allows to perform a custom test on the backend using the
optional InternalTester interface.
2018-02-25 10:58:06 +01:00
Nick Craig-Wood
9252224d82 vfs: don't open the file when using a RW handle for a null Seek
Background: cmd/mount/file.go Open() function does a Seek(0, 1) to see
if the file handle is seekable to set a FUSE hint.  Before this change
the file was downloaded before it needed to be which was inefficient
(and broke beta.rclone.org because HEAD requests caused downloads!).
2018-02-22 17:28:21 +00:00
Nick Craig-Wood
1383df4f58 b2: add more logging on multipart upload errors to debug #2036 2018-02-21 09:05:59 +00:00
Nick Craig-Wood
0ce81f68fe Make a beta release for all branches on the main repo (but not pull requests) 2018-02-20 16:06:39 +00:00
Nick Craig-Wood
20ca7d0e4f build: update to using go1.10 as the default go version
Note we have to put the version number in quotes to work around
https://github.com/travis-ci/gimme/issues/132
2018-02-20 13:41:16 +00:00
Nick Craig-Wood
4c3d42bcbb Add Daniel Loader to contributors 2018-02-20 13:04:14 +00:00
Nick Craig-Wood
2ef8de0843 Add Mateusz to contributors 2018-02-20 13:04:14 +00:00
Daniel Loader
a70200dd29 Add version output at end of the install.sh script 2018-02-20 13:03:50 +00:00
Nick Craig-Wood
c99412d11e cryptcheck: make reading of nonce more efficient with RangeOption #1825
...also only calculate the required hash which will speed things up slightly.
2018-02-19 18:00:39 +00:00
Nick Craig-Wood
abc736df1d cat: Use RangeOption for limited fetches to make more efficient #1825 2018-02-19 18:00:39 +00:00
Nick Craig-Wood
ab0d06eb16 fs: Make RangeOption mandatory #1825 2018-02-19 18:00:39 +00:00
Nick Craig-Wood
9ffc3898b1 fstests: Allow RangeOption tests to run everywhere #1825 2018-02-19 18:00:39 +00:00
Mateusz
afc963ed92 config: retry saving the config after failure - fixes #2060 2018-02-19 17:59:27 +00:00
Nick Craig-Wood
c929de9dc4 crypt: Implement RangeOption #1825 2018-02-19 15:45:24 +00:00
Fabian Möller
451cd6d971 fs: add ChunkedReader 2018-02-19 15:45:24 +00:00
Fabian Möller
a647c54888 fs: add RangeSeeker interface 2018-02-19 15:45:24 +00:00
Nick Craig-Wood
334bf49d30 httplib: add Close() method to shut the server down and use it in tests 2018-02-19 15:45:24 +00:00
Nick Craig-Wood
d8f78a7266 serve http/webdav: update docs on SSL 2018-02-19 14:08:17 +00:00
Fabian Möller
62e72801be vfs: fix race between multiple RWFileHandle (#2052)
Fixes #2034
2018-02-18 14:12:26 +01:00
Nick Craig-Wood
358c1fbac9 serve http/webdav: support SSL/TLS 2018-02-16 18:28:10 +00:00
Nick Craig-Wood
cc9d7156e4 serve http/webdav: add --user --pass authentication #1802 2018-02-16 18:28:10 +00:00
Nick Craig-Wood
221a8a9c5d serve http/webdav: add --htpasswd option for authentication #1802 2018-02-16 18:28:10 +00:00
Nick Craig-Wood
2b6f7028a6 vendor: github.com/abbot/go-http-auth for #1802 2018-02-16 18:28:09 +00:00
Nick Craig-Wood
5530662ccc serve http/webdav: factor common http server creation to httplib 2018-02-16 17:48:20 +00:00
Nick Craig-Wood
442334ba61 vfs: disable cache cleaner if --vfs-cache-poll-interval=0
And use this to disable the cleaner in the cache tests to make them
more reliable
2018-02-16 14:12:46 +00:00
Nick Craig-Wood
70b4842823 Add Victor to contributors 2018-02-16 13:21:50 +00:00
Victor
2f63a9f81c onedrive: Overwrite object size value with real size when reading file.
Because of a bug in the Onedrive API it will sometime report the wrong
size. If the size is wrong other remotes that depend on the size might
fail. To fix this we overwrite the objects size with the real size
from ContentLength header.
2018-02-16 13:21:26 +00:00
Nick Craig-Wood
8a9ed57951 vfs: fix another race in cache tests 2018-02-16 12:05:59 +00:00
Nick Craig-Wood
a5c3bcc9c7 fshttp: fix idle timeouts for HTTP connections #2057
Now we only nudge on the idle timeout after a successful Read or Write
which returns some bytes and no errors.
2018-02-16 10:35:41 +00:00
Nick Craig-Wood
9b800d7184 vfs: fix race in cache tests 2018-02-15 21:34:37 +00:00
Nick Craig-Wood
b1945d0094 swift: fix refresh of authentication token
Before this fix we were doing the token refresh but ignoring the new
tokens.

This bug was introduced in v1.39 by 4c0e2f9b3b

Fixes #2018
Fixes #2031
2018-02-15 19:22:45 +00:00
remusb
9a34fd984c cache: fix dirmove with temp fs enabled 2018-02-14 23:47:45 +02:00
Nick Craig-Wood
644313a4b9 http: Fix handling of directories with & in
This was caused by inconsistent escaping of the URL in the prefix
check, so check the URL links back to the correct host and scheme
instead of the prefix check.

The decoded path check will catch any URLs which are outside of the
root.
2018-02-14 11:26:37 +00:00
Nick Craig-Wood
675e7c5d8e docs: make downloads into a table
Add the scripted downloads to the download page
2018-02-13 11:23:11 +00:00
Nick Craig-Wood
99f3c8bc93 docs: turn version into a partial so it can be reused more easily 2018-02-13 11:20:23 +00:00
Nick Craig-Wood
ff6a7142da Add Durval Menezes to contributors 2018-02-12 11:47:01 +00:00
Durval Menezes
691c725e8b docs: Enhanced documentation for the --drive-shared-with-me option. 2018-02-12 11:46:29 +00:00
Nick Craig-Wood
ee388c4331 New email address for Oliver Heyme 2018-02-12 11:43:28 +00:00
Nick Craig-Wood
771fbbe314 docs: for --max-delete 2018-02-12 11:32:59 +00:00
Bjørn Erik Pedersen
ab8c0a81fa Add a delete threshold to sync (--max-delete)
Fixes #959
2018-02-12 11:29:58 +00:00
Nick Craig-Wood
cd7fd51119 vfs: fix docs - fixes #2067 2018-02-12 11:29:32 +00:00
Nick Craig-Wood
0f787e43b0 mount: link the nssm service manager for mount under Windows 2018-02-12 11:29:32 +00:00
Nick Craig-Wood
3a7bb7b2df mount: update docs showing --vfs-cache-mode to work around limitations 2018-02-12 11:29:32 +00:00
remusb
54724a1362 cache: notify vfs when using temp fs - fixes #2051 2018-02-11 22:30:58 +02:00
Stefan Breunig
846bbef1e9 vfs: write 0 bytes when flushing unwritten handles to avoid race conditions in FUSE - fixes #1181 2018-02-11 17:59:13 +00:00
remusb
b33e3f779c cache: add support for polling 2018-02-10 22:01:05 +02:00
Nick Craig-Wood
8a25ca786c drive: add --drive-impersonate for service accounts #1491 2018-02-09 16:58:35 +00:00
Nick Craig-Wood
04a0a7406b vfs: downgrade "poll-interval is not supported" message to Info
...to save confusion as it isn't very important
2018-02-09 07:57:50 +00:00
Oliver Heyme
9a653fea10 crypt: Changed max filename length documentation to 143 2018-02-06 18:26:58 +00:00
Fabian Möller
b183bd7f00 alias: add new backend to create aliases for remote names #1049
The alias backend is a wrapper for an existing remote.
It allows you to name a "remote:path" as an "alias:".
2018-02-06 18:23:47 +00:00
Nick Craig-Wood
5055b340da swift: Fix extra HEAD transaction when uploading a new file - fixes #2053
Also don't keep the swift.Headers as a pointer to a map, just use the map
2018-02-06 14:43:21 +00:00
Nick Craig-Wood
6546b7e0b0 vendor: update github.com/jlaffaye/ftp to fix FTP with online.net 2018-02-05 09:12:30 +00:00
Nick Craig-Wood
f4a5489d19 vendor: dep ensure changes 2018-02-05 09:10:45 +00:00
Nick Craig-Wood
82418c3021 box: improve accounting for chunked uploads 2018-02-02 15:14:41 +00:00
Nick Craig-Wood
bf6101cb6c azureblob: improve accounting for chunked uploads 2018-02-02 15:14:41 +00:00
Nick Craig-Wood
5723d2dbff pcloud: remove unused chunked upload flag and code 2018-02-02 15:14:41 +00:00
Nick Craig-Wood
d0d6b83a7a fs/accounting: rework to enable accounting to work with crypt and b2
This removes the old system of part accounting and replaces it with a
system of popping off the accounting reader and wrapping up new ones
as necessary.

This makes it much easier to carry the context down the chain of
wrapped readers and get the limiting as near as possible to the
output.  This makes the accounting more accurate and the bandwidth
limiting smoother.

Fixes #2029 and Fixes #1443
2018-02-02 15:14:41 +00:00
Nick Craig-Wood
bea02fcf52 fs/accounting: factor into separate files without changing functionality 2018-02-02 15:14:40 +00:00
Nick Craig-Wood
8722403b0d Add nbuchanan to contributors 2018-02-02 14:24:42 +00:00
nbuchanan
9aa8815990 drive: add --drive-use-created-date to use created date as modified date 2018-02-02 14:20:11 +00:00
Nick Craig-Wood
6fb868e00c config: fix --log-level flag after code reorganization - fixes #2043 2018-02-02 14:07:44 +00:00
Nick Craig-Wood
2f746426e7 install.sh: use mv to overwite an existing binary
This stops the install process erroring with "Text file busy" when
trying to `cp` over the binary.
2018-02-02 13:49:37 +00:00
ishuah
4c1ffc7f54 copy/move: detect file size change during copy/move - fixes #1250 2018-02-02 13:49:11 +00:00
Jakub Tasiemski
1018e9bb27 cmd: rewrite touch tests #1934 2018-02-02 13:46:56 +00:00
Nick Craig-Wood
295c3fabec vfs: fill and clean the cache immediately on startup 2018-02-02 12:19:53 +00:00
Nick Craig-Wood
3f8d286a75 vfs: fix cache cleaning on startup
Previous to this fix the vfs cache wasn't being cleaned properly on
startup as the atimes of the existing files were being ignored.
2018-02-02 12:06:42 +00:00
Nick Craig-Wood
fc8641809e fstests: add name of remote to WARN message 2018-02-02 12:05:34 +00:00
Nick Craig-Wood
de35f1c165 Show WARN in integration tests if remote not configured 2018-02-02 09:50:58 +00:00
Nick Craig-Wood
2974efc7d6 Makefile: disable caching in integration tests 2018-02-02 09:37:00 +00:00
Nick Craig-Wood
a6227f34e2 drive: request the export formats only when required #320
If the listing has no google docs in or the user uses
`--drive-skip-gdocs` then we don't fetch the export formats which
saves a transaction to drive.
2018-02-01 12:05:00 +00:00
Fabian Möller
3c7a755631 lsjson: explain the Path value in the docs 2018-01-31 20:06:01 +00:00
Nick Craig-Wood
8df78f2b6d operations: ignore size of objects when they are < 0 #320
This allows google docs to be transferred and checked correctly.
2018-01-31 16:22:05 +00:00
Nick Craig-Wood
44276db454 vfs: make -ve sized files appear as 0 size. #320
This means that Google docs will no longer appear as huge files in
`rclone mount`.  They will not be downloadable, though sometimes
trying twice will work.
2018-01-31 16:22:05 +00:00
Nick Craig-Wood
2eb5cfb7ad fs: Formalize the ObjectUnWrapper interface 2018-01-31 16:21:41 +00:00
remusb
b3d8b7e22e cache: use atexit for cleanup 2018-01-30 22:35:53 +02:00
Nick Craig-Wood
ed2d4ef4a2 travis: revert switch to using the .x version notation for the go minor versions
This doesn't seem to work for the `on` clause in the deploy script so
revert to the previous scheme.

Fixes #2033
2018-01-30 16:28:55 +00:00
Nick Craig-Wood
11fe3fdc16 drive: update docs to clarify access to "Computers" tab #1773 2018-01-30 16:28:55 +00:00
Fabian Möller
cf6d522d2f drive: fix upload to existing file (#2032)
This fixes uploads to existing files for Google Drive introduced by #2007.
Instead of updating the old file a new "Untitled" file would be created
in the root folder.
2018-01-30 14:37:06 +01:00
Fabian Möller
29d428040c cache: clean root path (#2023)
Trim "/" from the root path to fix "slice bounds out of range" panic
in cache.go:1272.

Fixes #1945
2018-01-30 14:35:40 +01:00
Fabian Möller
1aa482c333 drive: fix chunked upload (#2030) 2018-01-29 23:36:39 +01:00
remusb
40af98b0b3 cache: offline uploading 2018-01-30 00:05:04 +02:00
Nick Craig-Wood
c277a4096c mount: don't set modtime twice #2021 2018-01-29 20:49:13 +00:00
Nick Craig-Wood
1852a0e0c9 dropbox: Fix custom oauth client parameters - fixes #2028 2018-01-29 20:04:41 +00:00
Nick Craig-Wood
44cedbd9d9 Update MAINTAINERS with our new maintainer Fabian Möller @B4dM4n 2018-01-29 16:35:35 +00:00
Nick Craig-Wood
540e00e938 Merge Fabian Möller's email addresses 2018-01-29 16:33:56 +00:00
Nick Craig-Wood
a4fe2455ed drive: add scope configuration and root folder selection
This allows:

  * appdata access - Fixes #1799
  * access to backup and sync folders - Fixes #1773
  * drives.file access - Fixes #2000
  * read only access - Fixes #337
2018-01-29 14:40:10 +00:00
Fabian Möller
f622017539 drive: use contains for name matching in list
Use contains for name matching in list to work around #1675.
2018-01-29 14:18:49 +00:00
Fabian Möller
07f20dd1fd drive: migrate to api v3 2018-01-29 12:00:02 +00:00
Nick Craig-Wood
fe52502f19 fs: Adjust RangeOption.Decode to return -1 for read to end
A Range request can never request 0 bytes however this change was made
to make a clearer signal that the limit means read to the end.

Add test and more documentation and fixup uses
2018-01-27 14:31:29 +00:00
Nick Craig-Wood
9a73688e3a fs: Add ParseRangeOption to parse incoming Range: requests 2018-01-27 13:16:37 +00:00
Nick Craig-Wood
bc3ee977f4 fs/hash: move interface assertion to tests so it doesn't pull in spf13/flag 2018-01-26 14:35:18 +00:00
Nick Craig-Wood
a69fc8b80d travis: run tests on go1.10rc1 2018-01-26 12:16:46 +00:00
Nick Craig-Wood
926cd52a7f Makefile: make full tests run on go1.10+ as well as go1.9 2018-01-26 12:02:44 +00:00
Nick Craig-Wood
c2ce3114f4 Update CONTRIBUTING with more info about integration tests. 2018-01-26 10:00:16 +00:00
Fabian Möller
29286cc8b3 drive: fix single Drive Document as FS root
Allow using Drive Documents as FS root by doing a direcoty list during NewFS.

Fixes #1772
2018-01-26 09:59:36 +00:00
Fabian Möller
1f5e23aedb scripts: make absolute paths consistent
Change absolute binary paths in scripts to /usr/bin/env or make them
relative.
This allows the scripts to be used on linux distributions
like NixOS, where binaries are not located in /usr/ or /bin/.
2018-01-26 09:39:05 +00:00
Nick Craig-Wood
d016438243 fstest: Fix CheckWithDuplicates after code reshuffle to not use operations 2018-01-25 12:03:39 +00:00
Nick Craig-Wood
fa500e6d21 lib/atexit: factor from cmd so it can be used by backend/cache #1946 2018-01-25 10:33:00 +00:00
Nick Craig-Wood
dbabb18b0c vfs: Make error messages more informative #2009 2018-01-25 10:33:00 +00:00
Nick Craig-Wood
6f6f2aa369 fstest: Fix config file override, hence fixing make quicktest 2018-01-25 10:33:00 +00:00
Fabian Möller
17dabf7a99 ftp: fix RangeOption support in Open #1825 2018-01-25 10:21:00 +00:00
Fabian Möller
9520992a54 sftp: fix RangeOption support in Open #1825 2018-01-25 10:20:43 +00:00
Fabian Möller
a3dd2c691e amazonclouddrive: remove unnecessary notifies from DirChangeNotify
It is unnecessary to notify the node.Parents, because a cahnge event is
generated for all involved files and folders in a move from d1/f1 to
d2/f1. There will be a event for d1, d2 and f1.

Additionally a duplicate notification is resolved when them empty string
is in pathsToClear.

Related to #2006
2018-01-25 10:19:06 +00:00
Nick Craig-Wood
38f829842a s3: fix server side copy and set modtime on files with + in - fixes #2001
This was broken in 64ea94c1a4 when
putting a work-around for Digital Ocean.  PathEscape has now been
adjusted so it works with both providers.
2018-01-23 10:50:50 +00:00
Nick Craig-Wood
f9806848fe fstest: use the difficult file name for server side copy #2001
This should detect re-occurrence of #315
2018-01-23 09:37:33 +00:00
Nick Craig-Wood
88e0770f2d cache: Implement RangeOption #1825 2018-01-22 19:44:55 +00:00
Nick Craig-Wood
a6833b68ca local: factor RangeOption code to Decode() method and readers.LimitedReadCloser #1825 2018-01-22 19:44:00 +00:00
Nick Craig-Wood
e44dc2b14d box: fix RangeOption support in Open #1825 2018-01-22 17:05:47 +00:00
Nick Craig-Wood
d876392d15 onedrive: Factor code into fs.FixRangeOption 2018-01-22 17:05:00 +00:00
Nick Craig-Wood
c098e25552 fstest: Skip RangeOption test on Appveyor also 2018-01-22 11:10:29 +00:00
Fabian Möller
186f78d44f local: fix RangeOption support in Open #1825 2018-01-21 19:50:26 +00:00
Nick Craig-Wood
ea69deaa4c fstests: Skip RangeOption test in CI until all implemented 2018-01-21 18:09:16 +00:00
Nick Craig-Wood
c963c74fbe onedrive: fix RangeOption support in Open #1825 2018-01-21 17:11:37 +00:00
Nick Craig-Wood
9c45125271 azureblob: fix RangeOption support in Open #1825 2018-01-21 17:11:32 +00:00
Nick Craig-Wood
8653944a6d Make RangeOption manadatory for Open - #1825
Add an integration test to make sure all backends implement
RangeOption correctly.
2018-01-21 17:09:12 +00:00
Nick Craig-Wood
84bc4dc142 Clarify RangeOption semantics 2018-01-21 09:51:28 +00:00
Nick Craig-Wood
84d00e9046 authors: Fix duplicated entry for Iakov Davydov 2018-01-21 09:38:50 +00:00
Jon Fautley
71bc108ce6 sftp: performance: don't consult config file outside of Fs setup 2018-01-21 09:37:22 +00:00
Stefan Breunig
e57a388851 docs: Update integration testing guide 2018-01-20 18:52:53 +00:00
Nick Craig-Wood
bfa2878d24 Add Andreas Roussos to contributors 2018-01-20 18:50:29 +00:00
Andreas Roussos
dcdb43eb07 Fix typos, reword the description of the lsl command
Add a period at the end of each sentence for consistency.
Change the remaining verbs to their imperative form (again, for consistency).
The default `rclone lsl` output is size, modification time and path, so reword the command description to reflect that.
Correct various typos.
2018-01-20 18:50:20 +00:00
Fabian Möller
115d24e1f7 amazonclouddrive: implement DirChangeNotify
Use the Changes API to invalidate cache entries.
The latest retrieved checkpoint is stored in the config file to allow
fast resumption after restart.
2018-01-20 18:48:52 +00:00
Nick Craig-Wood
62b74d06ff Add Jody Frankowski to contributors 2018-01-20 18:15:27 +00:00
Nick Craig-Wood
7117ba7d58 Add Iakov Davydov to contributors 2018-01-20 18:15:27 +00:00
Jody Frankowski
5e73acd40a Clean up mount.go and vfs/help.go docs
* Title cleanups
* Typos
* `rclone mount vs rclone sync/copy` update with `File Caching`
2018-01-20 18:14:20 +00:00
Nick Craig-Wood
25a41e1945 drive: fix missing error handler 2018-01-20 18:04:23 +00:00
Nick Craig-Wood
ee66419a27 fs/fserrors: Add test for error from #1964 2018-01-19 17:07:40 +00:00
Nick Craig-Wood
8e86a902e2 travis: switch to using the .x version notation for the go minor versions 2018-01-19 14:32:32 +00:00
Nick Craig-Wood
a80d8a21dc vfs: add flags parameter to Dir.Create 2018-01-19 13:18:40 +00:00
Nick Craig-Wood
517bdc719b vfs: make specialized file Open functions private 2018-01-19 11:46:01 +00:00
Nick Craig-Wood
5ad226ab54 fs: Add dir option to fs.Purge #1891
Purge optional interface needs fixing too.
2018-01-19 11:45:50 +00:00
Nick Craig-Wood
a375992186 fstest: Fix removal of test folders/buckets 2018-01-19 10:20:06 +00:00
Nick Craig-Wood
b96c73bee6 test_all: fix -clean flag 2018-01-19 09:47:01 +00:00
Nick Craig-Wood
97c414f025 config/hash: rename more symbols after factoring into own package 2018-01-18 20:27:52 +00:00
Nick Craig-Wood
71722b5b95 config: factor Obscure and Reveal into its own package 2018-01-18 20:19:55 +00:00
Nick Craig-Wood
59a8108fc3 webdav: add a new time format #1952 2018-01-18 16:54:13 +00:00
Nick Craig-Wood
821be5ebed ncdu: add link to asciinema demo of it in action 2018-01-18 14:22:43 +00:00
Nick Craig-Wood
2030dc13b2 lib/oauthutil: fix Google drive oauth process
The problem was introduced by the code refactoring in
11da2a6c9b
2018-01-18 11:18:35 +00:00
Ernest Borowski
5cce74d630 flags: remove --no-traverse flag because it is obsolete - fixes #1813
Signed-off-by: Ernest Borowski <er.borowski@gmail.com>
2018-01-18 11:00:25 +00:00
Iakov Davydov
acd55a8f65 local, fs: --exclude-if-present ignores directories which it doesn't have permission for - fixes #1959 2018-01-16 20:00:16 +00:00
Nick Craig-Wood
ad76dd0adc Add Lucas Bremgartner to contributors 2018-01-16 19:53:59 +00:00
Lucas Bremgartner
8c90bfb0cd FAQ: env vars for SSL root certs and DNS resolver
Added section to FAQ about environment variables, which allow to control
location of SSL root certificate as well as DNS resolver used.

see also comment in #683
2018-01-16 19:53:47 +00:00
Nick Craig-Wood
4b0c5f79b5 qingstor: Only support on go1.7+ 2018-01-16 17:05:26 +00:00
Nick Craig-Wood
1848e26183 dropbox: Only support on go1.7+
See https://github.com/dropbox/dropbox-sdk-go-unofficial/pull/40
2018-01-16 17:05:02 +00:00
Nick Craig-Wood
7d3a17725d vendor: update all dependencies to latest versions 2018-01-16 13:20:59 +00:00
Nick Craig-Wood
8e83fb6fb9 Makefile: Fix integration test runner 2018-01-16 13:14:41 +00:00
Nick Craig-Wood
11da2a6c9b Break the fs package up into smaller parts.
The purpose of this is to make it easier to maintain and eventually to
allow the rclone backends to be re-used in other projects without
having to use the rclone configuration system.

The new code layout is documented in CONTRIBUTING.
2018-01-15 17:51:14 +00:00
Nick Craig-Wood
92624bbbf1 Move all backends into backend directory 2018-01-12 20:27:08 +00:00
Nick Craig-Wood
60afda007b Move dircache, oauthutil, rest and pacer modules into lib 2018-01-12 17:07:38 +00:00
Nick Craig-Wood
b8b620f5c2 Move all backends into backend directory 2018-01-12 17:07:38 +00:00
ishuah
0a7731cf0d cryptdecode: added option to return encrypted file names. Fixes #1923 2018-01-11 19:22:40 +03:00
Will Gunn
6cac98d2ce docs: Add documentation for --stats-file-name-length
Missed adding documentation in original PR https://github.com/ncw/rclone/pull/1951

    Fixes comment on #1206
2018-01-11 13:55:25 +00:00
Nick Craig-Wood
712e6a8085 lsf: fix integration tests 2018-01-11 13:52:15 +00:00
Nick Craig-Wood
6d333da69f Add Will Gunn to contributors 2018-01-10 20:33:57 +00:00
Will Gunn
5c7e8d5a2b fs: Add --stats-file-name-length to specify the printed file name length for stats
Fixes #1206
2018-01-10 20:32:36 +00:00
Jon Fautley
57f1bb7bb2 sftp: add 'set_modtime' hidden configuration option 2018-01-10 20:27:23 +00:00
Filip Bartodziej
5e83dce1f6 Installation script check for a tool to extract zip archives #1949 2018-01-10 20:18:20 +00:00
Nick Craig-Wood
052c886317 sftp: read $USER in username fallback not $HOME 2018-01-08 21:39:16 +00:00
Nick Craig-Wood
28480c0570 sftp: use correct OS way of reading username - fixes running under crontab 2018-01-07 12:57:46 +00:00
Nick Craig-Wood
72349bdaae Add Jon Fautley to contributors 2018-01-07 11:19:14 +00:00
Jon Fautley
36e6d23112 sftp: Add option to disable remote hash check command execution 2018-01-07 11:18:51 +00:00
Nick Craig-Wood
0eba37d8f3 lsf: add --files-only and --dirs-only flags 2018-01-06 18:04:24 +00:00
Nick Craig-Wood
c74c3b37da lsf: add option to print hashes 2018-01-06 17:53:37 +00:00
Nick Craig-Wood
7c71ee1a5b fs: fix TestListFormat on remotes which return 0 as dir size not -1 2018-01-06 17:47:42 +00:00
Nick Craig-Wood
ed20fa5ee7 ls* commands: update docs and add defaults into options for lsf 2018-01-06 17:00:20 +00:00
Nick Craig-Wood
54a9fdf421 ls2: remove in favour of lsf 2018-01-06 14:41:36 +00:00
Jakub Tasiemski
0d041602cf cmd: new command lsf 2018-01-06 14:39:31 +00:00
Nick Craig-Wood
8f47d7fc06 Add Chris Redekop to contributors 2018-01-06 14:30:27 +00:00
Chris Redekop
4dd1e507f4 s3: set/get the hash for multipart files - #523 2018-01-06 14:30:10 +00:00
Nick Craig-Wood
65618afd8c serve/http: fix serving files with : in - fixes #1939 2018-01-05 17:25:05 +00:00
Nick Craig-Wood
be4ed14525 rest: rename URLEscape to URLPathEscape for consistency with go1.8 2018-01-05 15:55:43 +00:00
Nick Craig-Wood
ef89f1f1a7 webdav: parse time in alternate format for mydrive.ch - fixes #1952 2018-01-05 14:28:06 +00:00
Nick Craig-Wood
b412c745a1 Start v1.39-DEV development 2017-12-23 13:40:28 +00:00
Nick Craig-Wood
f34a9116d4 Version v1.39 2017-12-23 13:07:45 +00:00
Andrew Starr-Bochicchio
64ea94c1a4 s3: Use rest.URLEscape rather than url.QueryEscape.
The X-Amz-Copy-Source takes a path. url.QueryEscape
escapes spaces with a plus sign while rest.URLEscape
(which mimics the url.PathEscape available from go 1.8)
uses '%20'

This works around an issue when copying objects with
spaces in their key on DigitalOcean Spaces.
2017-12-23 11:27:45 +00:00
remusb
4eac50eb83 cache: update docs for 1.39 2017-12-22 13:52:55 +02:00
Nick Craig-Wood
5683f74025 Add Yassine Imounachen to contributors 2017-12-21 10:33:43 +00:00
Yassine Imounachen
fe71d4fd87 Fix 'QingClound' typo 2017-12-21 10:33:21 +00:00
remusb
a64d92bd35 cache: update internal tests with chunk path 2017-12-20 23:03:44 +02:00
remusb
c5cf0792f2 cache: add the ability to specify a custom chunk path - fixes #1872 2017-12-20 22:43:30 +02:00
Nick Craig-Wood
255d3e925d s3: fix crash if a bad listing is received - fixes #1927
Caringo Swarm is returning a listing with IsTruncated set but no
NextMarker and no Keys.  Rclone doesn't know how to continue the
listing at this point, so it returns an error rather than truncating
the listing or risking a loop.
2017-12-20 16:51:07 +00:00
remusb
0d4bff8239 cache: fix Windows separator issue for #1904 (#1930) 2017-12-20 17:24:50 +02:00
Nick Craig-Wood
4ba58884b1 webdav: decode multiple <s:propstat> more carefully - fixes nextcloud 12.0.4
For some reason nextcloud sends multiple propstat responses now, one
with a 404 status.  rclone was interpreting the last status and
assuming the file was missing.
2017-12-20 11:53:10 +00:00
remusb
8839e4ee33 cache: add SIGHUP support to evict all cache - fixes 1906 2017-12-19 15:48:48 +02:00
remusb
ebbe77f525 cache: enable internal tests and fix race condition for them (#1928) 2017-12-19 15:37:38 +02:00
remusb
6f1ae00c7f cache: disable unreliable internal tests 2017-12-18 16:31:15 +02:00
remusb
6b5989712f cache: refactor entries caching pattern for #1904 (#1924) 2017-12-18 14:55:37 +02:00
Nick Craig-Wood
29d34426bc vfs: fix deletion of in use directories #1860
This was causing errors if the cache cleaner was called between the
Open and the pendingOpen of a RW file.

The fix was to move the cache open to the Open from the openPending.
2017-12-15 15:42:49 +00:00
Nick Craig-Wood
2a01fa9fa0 moveto,copyto: clarify error message if source doesn't exist - fixes #1022 2017-12-15 11:37:31 +00:00
Nick Craig-Wood
4c0e2f9b3b swift: fix crash on bad authentication - fixes #1919
This also fixes Hubic not re-authenticating for long transfers.
2017-12-14 14:23:55 +00:00
Nick Craig-Wood
240c97cd7a Update MAINTAINERS doc 2017-12-14 13:56:58 +00:00
Nick Craig-Wood
2fd0bec4e4 docs: note that script install checks the version 2017-12-14 11:00:22 +00:00
Nick Craig-Wood
7e585cda96 fs: fix TestRmdirsLeaveRoot test 2017-12-14 08:57:28 +00:00
Nick Craig-Wood
1b1593a894 Add lewapm to contributors 2017-12-13 10:24:16 +00:00
lewapm
9c242edc10 rmdirs: add --leave-root flag - fixes #1874 2017-12-13 10:23:54 +00:00
Nick Craig-Wood
0914ec316c b2: fix multipart upload retries #1733
Prior to this fix we were uploading 0 length bodies if a retry was
needed on a multipart upload chunk.  This gave this error `http:
ContentLength=268435496 with Body length 0`.

Fix by remaking the hash appending reader in the Call loop.  This is
inefficient in the face of retries, but these are uncommon.
2017-12-13 10:11:20 +00:00
Nick Craig-Wood
2cf808c825 ncdu: fix crashes on empty directories - fixes #1910
Up arrow or right arrow in an empty directory would crash ncdu
2017-12-12 13:54:15 +00:00
Nick Craig-Wood
66558213e0 b2: send correct fileName when using --hard-delete - fixes #1905 2017-12-12 07:48:06 +00:00
remusb
84701e376a cache: delay Plex connection to the first read handle - fixes #1903 2017-12-12 00:46:08 +02:00
remusb
829dd1ad25 cache: try a full read on the last chunk for #1896 2017-12-11 01:15:53 +02:00
remusb
7c972d375b cache: fix mismatched types for #1896 2017-12-10 14:16:16 +02:00
remusb
3d2f3d9a7f cache: catch panic and add more logging for #1896 2017-12-10 14:11:31 +02:00
Nick Craig-Wood
845b22a628 Add Jon Fautley to contributors 2017-12-10 10:53:49 +00:00
Jon Fautley
3684585104 sftp: add option to enable the use of aes128-cbc cipher 2017-12-10 10:53:32 +00:00
Filip Bartodziej
f424019380 error codes documented and bugs fixed 2017-12-10 10:16:20 +00:00
Filip Bartodziej
ab03f6e475 version check in curl installation 2017-12-10 10:16:20 +00:00
remusb
b48b537325 cache: plex integration, refactor chunk storage and worker retries (#1899) 2017-12-09 23:54:26 +02:00
ishuah
b05e472d2e stats: condensed transfer output to fit 80x25 terminals 2017-12-09 10:48:36 +03:00
Nick Craig-Wood
5061aaaf46 vendor: update github.com/dropbox/dropbox-sdk-go-unofficial to fix #1806 2017-12-07 22:14:36 +00:00
Nick Craig-Wood
e00616b016 Write version.txt on building into root of downloads 2017-12-07 21:49:32 +00:00
Nick Craig-Wood
09f203f62b Add Filip Bartodziej to contributors 2017-12-07 21:37:09 +00:00
Filip Bartodziej
2965cbe264 curl install for rclone #1856 2017-12-07 21:36:55 +00:00
Nick Craig-Wood
bb3ba7b314 Add Giovanni Pizzi to contributors 2017-12-07 21:31:15 +00:00
Giovanni Pizzi
f12512dd13 swift: Allow authentication with storage url and auth key
Adding the option to load the storage url and the auth key
from the environment when you have an alternate authorization,
external to rclone, and you need to use it (e.g. because
it's not yet supported by the swift go library)

Allowing to get alternate authentication from config file,
and using proper way (c.Authenticated()) to know if it's authenticated.

Updated docs as well
2017-12-07 21:30:58 +00:00
remusb
25b073c767 fs: add Wrap feature for FS to identify their parent FS (#1884) 2017-12-06 17:14:34 +02:00
Nick Craig-Wood
ebd7780188 fstest: don't error out if the target was not found at end of run 2017-12-04 15:58:29 +00:00
Nick Craig-Wood
fa4a25a73b fs: only test one level of cache
Can't test multiple caches at once as can only have 1 DB open at once
2017-12-04 15:50:59 +00:00
Ernest Borowski
934df67aef filter: warn the user if he use --include and --exclude together fixes #1764
Signed-off-by: Ernest Borowski <er.borowski@gmail.com>
2017-12-04 14:20:01 +00:00
Nick Craig-Wood
006b296c34 Tidy up Makefile to get rid of vendor directory avoidance workarounds 2017-12-03 13:03:20 +00:00
Nick Craig-Wood
38b85e94ea vfs: rename --cache-* options to --vfs-cache-* to save confusion
..as the backend cache options are all called --cache-* too. Adjust
docs to point out what the vfs cache does vs the backend cache.
2017-12-03 12:14:15 +00:00
Nick Craig-Wood
4b185355df fs: rcat - use in memory object and Copy for more reliable transfers 2017-12-03 12:14:15 +00:00
Nick Craig-Wood
7d15c33e42 fs: make Copy and Move return the destination object if possible 2017-12-03 12:14:15 +00:00
Nick Craig-Wood
11332a19a0 fs: make an in memory object for short transfers 2017-12-03 12:14:15 +00:00
Nick Craig-Wood
a1f8318b29 Add Laurence to contributors 2017-12-03 10:24:53 +00:00
Laurence
e767c9ac9f Fix typo in dbhashsum description 2017-12-03 10:24:33 +00:00
Nick Craig-Wood
56cfb810a8 Add Tim Cooijmans to contributors 2017-12-03 10:22:42 +00:00
Tim Cooijmans
835ca15ec8 drive: add service account support. Fixes #839. 2017-12-03 10:21:41 +00:00
remusb
4af4bbb539 cache: add support for PutStream - fixes #1836 2017-11-30 21:16:45 +02:00
remusb
47450ba326 cache: handle errors when bolt tries to start 2017-11-30 12:27:59 +02:00
Nick Craig-Wood
639e812789 fs: integration tests: add SUMMARY heading for log scraping 2017-11-29 15:55:37 +00:00
Nick Craig-Wood
1c6cad2252 fs: integration tests: add 30 minute timeout per test 2017-11-29 13:51:17 +00:00
Nick Craig-Wood
6d3df6f172 cmount: make tests more reliable on Windows 2017-11-28 20:39:24 +00:00
Nick Craig-Wood
c16ac697a9 vfs: keep track of directories in the cache also #1860
This makes managing empty directories more reliable.
2017-11-28 20:39:23 +00:00
Nick Craig-Wood
0978957a2e vfs: make sure all 96 combinations of flags for Open work 2017-11-28 20:39:23 +00:00
Nick Craig-Wood
d1b19f975d vfs: remove items from cache when deleted #1860
Also fixes Error message when items have been deleted from the cache
(eg when Moved) when the cache reaper comes to delete them.
2017-11-28 16:13:58 +00:00
ishuah
aab8051f50 move: add --delete-empty-src-dirs flag - fixes #1854 2017-11-28 11:38:19 +03:00
Nick Craig-Wood
1248beb0b2 cachestats: Fix nil pointer if not a cache remote - fixes #1855
Also don't retry or show stats
2017-11-24 10:22:23 +00:00
Nick Craig-Wood
6448c445f5 acd: Fix download of large files failing - Fixes #1501
Previously it was necessary to work around large files failing to
download with `--acd-templink-threshold`.  This change makes that flag
obsolete and all files should download.  Templinks may be useful under
some circumstances though the flag isn't being removed.

It does this by filtering `Authorization:` headers out in the
transport if the authorization is supplied in the URL.  This prevents
the "Only one auth mechanism allowed; only the X-Amz-Algorithm query
parameter, Signature query string parameter or the Authorization
header should be specified" error from AWS.
2017-11-24 09:14:25 +00:00
Nick Craig-Wood
fdb01437d8 fs: Allow the http Transport to have an optional filter request function 2017-11-24 09:07:56 +00:00
Nick Craig-Wood
729e1305b7 oauthutil: Allow the http.Client to be passed in 2017-11-24 09:07:03 +00:00
Nick Craig-Wood
02ffd43572 fs: Save the config before asking for a token - fixes #1220
Before this if the client_id/client_secret was edited it would
disappear when asking for the new token.

This means the post config is done after the user has confirmed the
config is OK which can't be helped.
2017-11-23 14:01:32 +00:00
Nick Craig-Wood
e53892f53b fs,drive,dropbox: Make and use new RepeatableReader variants to lower memory use
RepeatableReaderSized has a pre-allocated buffer which should help
with memory usage - before it grew the buffer.  Since we know the size
of the chunks, pre-allocating it should be much more efficient.

RepeatableReaderBuffer uses the buffer passed in.

RepeatableLimit* are convenience funcitions for wrapping a reader in
an io.LimitReader and then a RepeatableReader with the same buffer
size.
2017-11-23 13:53:46 +00:00
ishuah
6c62fced60 move: fixed root source directories getting deleted after move - fixes #1849 2017-11-23 12:01:35 +03:00
Nick Craig-Wood
c64ad851af Add David Minor to contributors 2017-11-23 08:57:34 +00:00
David Minor
4c116af1d0 s3: add support for ECS task IAM roles
ECS container IAM metadata is in a different place than EC2 IAM metadata.
Use defaults' RemoteCredProvider function to query the standard locations
for the credentials.

Give the ECS role precedence over the role available from the underlying
EC2 instance.
2017-11-23 08:56:56 +00:00
Nick Craig-Wood
8357a82eee dropbox: change default chunk size to 48MB now we are buffering them in memory 2017-11-22 17:15:37 +00:00
Nick Craig-Wood
483f4b8ad9 dropbox: multiparts uploads retry retry every error after the first chunk is done 2017-11-22 17:15:37 +00:00
Nick Craig-Wood
6f61da5c75 dropbox: buffer the chunks when uploading large files so they can be retried
We use fs.RepeatableReader to buffer the chunks which plays nice with
the accounting.  The default chunk size is 128M which may be too
large.

Fixes #1806
2017-11-22 17:15:37 +00:00
Nick Craig-Wood
159fce0106 fs: fix --cache-dir to have some effect 2017-11-22 17:05:02 +00:00
remusb
569c1a2ec1 cache: catch signal interrupt for bolt handle cleanup 2017-11-22 18:32:36 +02:00
remusb
2497ca5134 cache: add extra logging in Move and Copy 2017-11-22 00:38:25 +02:00
Nick Craig-Wood
cbe5d7ce64 fs: Remove X-Auth-Token: from headers when dumping for swift 2017-11-21 17:32:07 +00:00
Nick Craig-Wood
1a65a4e769 fs: Add --dump flag, introduce --dump requests, responses and remove --dump-auth, --dump-filters
Now --dump-flag is written as --dump flag. This is a comma separated list which can contain

  * headers - HTTP headers as before
  * bodies  - HTTP bodies as before
  * requests - HTTP request bodies
  * responses - HTTP response bodies
  * auth - HTTP auth
  * filters - Filter rexeps

Leave --dump-headers and --dump-bodies for the time being but remove
the other --dump-* flags as they aren't used very often.
2017-11-21 17:32:07 +00:00
Nick Craig-Wood
bcf1ece43b Update MAINTAINERS with our new maintainer Remus Bunduc 2017-11-21 17:32:07 +00:00
ishuah
b4aa920a3d stats: show the amount of data transferred in kb/mb - fixes #1167 2017-11-21 12:40:02 +03:00
Remus Bunduc
41a97e39c8 cache: fix option help text 2017-11-21 11:25:28 +02:00
Nick Craig-Wood
abbcb2f5e0 cache: disable another unreliable test #1844 2017-11-20 21:25:38 +00:00
Nick Craig-Wood
cb6de4a2cf cache: disable unreliable test #1844 2017-11-20 19:55:00 +00:00
Nick Craig-Wood
dc1c679c65 mount: support truncate properly 2017-11-20 19:42:35 +00:00
Nick Craig-Wood
3fb4fe31d2 vfs: make sure write only handles never truncate files they shouldn't 2017-11-20 19:42:25 +00:00
Nick Craig-Wood
76b151984c vfs: cache the size of the object in the read handle 2017-11-20 17:57:13 +00:00
Nick Craig-Wood
f0ed384786 cache: fix default setting for warmup_age 2017-11-20 14:39:12 +00:00
Nick Craig-Wood
f80f7a0509 cache: use fs.CacheDir to make the default directory for the cache
NB this changes the default dir for the cache
2017-11-20 14:38:28 +00:00
Nick Craig-Wood
af50f31f7d mounttest: wait for Release after every Read to stop using in use files under Windows 2017-11-20 12:46:24 +00:00
Nick Craig-Wood
8e2213fbbd local: add error message for cross file system moves 2017-11-20 12:46:24 +00:00
Nick Craig-Wood
085c690798 build: add in 64bit path for WinFSP headers 2017-11-20 12:46:24 +00:00
Nick Craig-Wood
2b666187a6 cmount: disable tests on windows + race detector
These either hang or produce incorrect results for reasons I haven't
worked out yet.
2017-11-20 12:46:24 +00:00
Nick Craig-Wood
00b46a8b96 mounttest: wait for files to disappear from the directory listing 2017-11-20 12:46:24 +00:00
Nick Craig-Wood
b21f227bd3 mounttest: fix crash when FUSE not present 2017-11-20 12:46:24 +00:00
Nick Craig-Wood
e98e550021 mounttest: wait for all background Close/Release after writing a file
The filesystem does a certain amount of things asynchronously waiting
for the file to be released after writing it means everything should
be in a consistent state.
2017-11-20 12:46:23 +00:00
Nick Craig-Wood
60945d0a37 vfs: remove misleading comment 2017-11-20 12:46:23 +00:00
Nick Craig-Wood
b4083b4371 vfs: rename Fsync to Sync and implement Sync on Node and Handle 2017-11-20 12:46:23 +00:00
Nick Craig-Wood
eb3415db50 cmount: enable more tests for Windows 2017-11-20 12:46:23 +00:00
Nick Craig-Wood
9fbd8a6419 mounttest: fixes for running under Windows
* don't mount and unmount between cache runs - WinFSP doesn't suport it
  * use OS paths for opening things
2017-11-20 12:46:23 +00:00
Nick Craig-Wood
9738f8532b vfs: Add FlushDirCache method 2017-11-20 12:46:23 +00:00
Nick Craig-Wood
a5b034a992 vfs: add WaitForWriters to wait until all writers have finished 2017-11-20 12:46:23 +00:00
Nick Craig-Wood
321b6da7af vfs: don't remove file from writers until it is transferred
This means that the list of active writers is up to date
2017-11-20 12:46:23 +00:00
Nick Craig-Wood
1b22ee5b93 vfs: fix error handling in openPending so it returns the correct error 2017-11-20 12:46:23 +00:00
Nick Craig-Wood
eab55ce882 vfs: add open files to directories 2017-11-20 12:46:23 +00:00
Nick Craig-Wood
61b6159a05 mount, cmount: add O_CREATE to Open calls since fuse doesn't seem to supply it 2017-11-20 12:46:22 +00:00
Nick Craig-Wood
c560017934 vfs: add Path method to Node and use it to stop reading nil DirEntry
All DirEntry calls now have been checked for nil or converted to use Path.
2017-11-20 12:46:22 +00:00
Nick Craig-Wood
7c3584f4e6 mountlib: wait for mountpoint to disappear under Windows 2017-11-20 12:46:22 +00:00
Nick Craig-Wood
981cfb1bec mounttest: retry directory listings to account for slow updates on Windows 2017-11-20 12:46:22 +00:00
Nick Craig-Wood
992647b157 vfs: Don't error a r/w file open without cache; delay error until Read called
If we open a file for r/w without the cache we now always return a
handle and return an error if the file is ever read from.  This fixes
incompatibility with cmount under windows.
2017-11-20 12:46:22 +00:00
Nick Craig-Wood
dec21ccf63 vfs, cmount: make truncate work properly in the presence or otherwise of open files 2017-11-20 12:46:22 +00:00
Nick Craig-Wood
94adf4f43b cmount: translate FUSE open flags into OS flags
On Windows the fuse.O_* flags do not have the same values as the
os.O_* flags so translate between the two representations.  They are
mostly the same which is why this hasn't caused a problem before.
2017-11-20 12:46:22 +00:00
Nick Craig-Wood
e7f2935333 vfs: decode flags in Open/OpenFile for debug 2017-11-20 12:46:22 +00:00
Nick Craig-Wood
f5f8c0c438 cmount: make Truncate call the correct Handle or Node method 2017-11-20 12:46:22 +00:00
Nick Craig-Wood
60cdcf784c cmount: use -o atomic_o_trunc to make sure O_TRUNC is supplied to Open() 2017-11-20 12:46:22 +00:00
Nick Craig-Wood
57a5c67729 mounttest: run the tests for all 4 VFS cache modes 2017-11-20 12:46:21 +00:00
Nick Craig-Wood
d7908c06c9 mountlib: ensure we don't open files with read and write intent 2017-11-20 12:46:21 +00:00
Nick Craig-Wood
8951875c21 vfs,mount,cmount,mountlib: allow flags to be overriden by environment variables 2017-11-20 12:46:21 +00:00
Nick Craig-Wood
05a1e1532b vfs,mount,cmount,serve: Add documentation for vfs caching modes 2017-11-20 12:46:21 +00:00
Nick Craig-Wood
7f20e1d7f3 vfs: add read write files and caching #711
This adds new flags to mount, cmount, serve *

    --cache-max-age duration         Max age of objects in the cache. (default 1h0m0s)
    --cache-mode string              Cache mode off|minimal|writes|full (default "off")
    --cache-poll-interval duration   Interval to poll the cache for stale objects. (default 1m0s)
2017-11-20 12:36:50 +00:00
Nick Craig-Wood
bb0ce0cb5f vendor: vfs add vendor/github.com/djherbis/times 2017-11-20 12:36:50 +00:00
Nick Craig-Wood
e946a8eab0 fs: Add CacheDir config variable 2017-11-20 12:00:32 +00:00
Nick Craig-Wood
a0cfa0929b vfs: remove un-needed (after introduction of rcat) createInfo struct 2017-11-20 12:00:32 +00:00
Nick Craig-Wood
3fb1e96988 vfs: factor Open logic from Dir.Create into vfs.OpenFile 2017-11-20 12:00:32 +00:00
Nick Craig-Wood
46947b3b9b rcat: fix goroutine leak
This was leaking goroutines in the short file case beause it wasn't
calling Close() on the Account object.  This became apparent when
testing with mount.
2017-11-20 12:00:32 +00:00
Nick Craig-Wood
de98e2480d Add Jakub Tasiemski to contributors 2017-11-20 11:16:22 +00:00
Jakub Tasiemski
3cf7c61aa0 Add touch command - fixes #1594 2017-11-20 11:16:05 +00:00
Fabian Möller
d8b3bf014d mount: use sdnotify to signal systemd the mount is ready
When the NOTIFY_SOCKET environment variable is set notify systemd after
the mount is ready.
2017-11-20 11:03:10 +00:00
Fabian Möller
0bfa29cbcf vendor: add github.com/okzk/sdnotify 2017-11-20 11:03:10 +00:00
Nick Craig-Wood
6cc968b085 Add Fabian Möller to contributors 2017-11-19 22:14:33 +00:00
Fabian Möller
ce5b3a531d crypt: implement DirChangeNotify
crypt now implements the DirChangeNotify if the wrapped FS provides it.
2017-11-19 20:09:52 +00:00
Fabian Möller
5acb6f47e7 mountlib: log when poll-interval is ineffective
Notify the user in case poll-interval is used on a unsupported remote
2017-11-19 20:08:14 +00:00
Nick Craig-Wood
409ba56fde Add Iakov Davydov to contributors 2017-11-17 21:52:00 +00:00
Nick Craig-Wood
5d875e8840 Add Remus Bunduc to contributors 2017-11-17 21:52:00 +00:00
Iakov Davydov
429bb7e8b8 docs for --exclude-if-present 2017-11-17 21:51:11 +00:00
Iakov Davydov
7d3abdc463 tests for --exclude-if-present 2017-11-17 21:51:11 +00:00
Iakov Davydov
538246f6c3 support exclude file in --fast-list mode 2017-11-17 21:51:11 +00:00
Iakov Davydov
557dd8f031 ListDirSorted check for excludefile 2017-11-17 21:51:11 +00:00
Iakov Davydov
37aaa19f3a new option: --exclude-if-present 2017-11-17 21:51:11 +00:00
Iakov Davydov
cef2e3bf83 path -> startPath in walkRDirTree (we need the path package) 2017-11-17 21:51:11 +00:00
Iakov Davydov
a3a436ce16 WalkRDirTree: return error if unknown item type 2017-11-17 21:51:11 +00:00
Iakov Davydov
5d05df3124 ListContainsExcludeFile: checks for exclude file in the list 2017-11-17 21:51:11 +00:00
Iakov Davydov
421ba84e12 DirTree.Prune: deletes several directories 2017-11-17 21:51:11 +00:00
Iakov Davydov
7ae7080824 FileExists check if a file exists 2017-11-17 21:51:11 +00:00
ishuah
31d2fb4e11 mount: Fix mount breaking on Windows - fixes #1827 2017-11-16 15:20:53 +03:00
Nick Craig-Wood
704e82aab1 dropbox: adapt to upstream changes #1804 2017-11-15 16:02:29 +00:00
Nick Craig-Wood
fc352c1ff6 vendor: update github.com/dropbox/dropbox-sdk-go-unofficial to fix #1804 2017-11-15 15:55:01 +00:00
Nick Craig-Wood
e491093cd1 vendor: dep ensure to get things into sync after merges 2017-11-15 15:52:44 +00:00
Remus Bunduc
016abf825e cache: first version 2017-11-15 15:23:21 +00:00
Remus Bunduc
0c942199c9 cache: add vendor requirements: bbolt and go-cache 2017-11-15 15:23:21 +00:00
ishuah
aec2265be0 rclone: implement exit codes - #1136 2017-11-15 17:48:37 +03:00
Substantiel
2423fa40e2 config: add password sub command for setting obscured passwords 2017-11-15 14:44:45 +00:00
Nick Craig-Wood
4355f3fe97 Add Ernest Borowski to contributors 2017-11-14 21:25:02 +00:00
Ernest Borowski
9fbff7bcab mountlib: check if directory is not empty before mounting - fixes #1386
Signed-off-by: Ernest Borowski <er.borowski@gmail.com>
2017-11-14 21:24:31 +00:00
Substantiel
413faa99cf oauthutil: make sure auth server always finishes even when things go wrong 2017-11-09 21:34:44 +00:00
ishuah
ed91d6b5a5 Added Ishuah Kariuki to MAINTAINERS.md 2017-11-09 17:10:32 +03:00
ishuah
c65734ee69 move: delete source directory after successful move - fixes #1642 2017-11-07 22:21:38 +00:00
Nick Craig-Wood
8c8abfd6dc vendor: update github.com/a8m/tree - fixes #1797 2017-11-06 11:23:27 +00:00
ishuah
dfaee55ef3 crypt: Added option to encrypt directory names or leave them intact - #1240 2017-11-06 10:38:48 +00:00
Nick Craig-Wood
72072d7d6b Add Pierre Carlson to contributors 2017-11-05 22:09:31 +00:00
Pierre Carlson
f1287e13f7 Add new fields for swift configuration to support IBM Bluemix Swift 2017-11-05 22:08:43 +00:00
Substantiel
7749157596 Add --auto-confirm flag 2017-11-05 21:56:50 +00:00
Oliver Heyme
682b4d54c5 onedrive: Add option to choose resourceURL during setup of OneDrive Business account if more than one is avauilable for user 2017-11-05 21:41:56 +00:00
Nick Craig-Wood
245edd1b0e local: fix equality check for times 2017-11-05 21:39:49 +00:00
Nick Craig-Wood
4d081ec87e Add Corban Raun to contributors 2017-11-05 21:39:49 +00:00
Corban Raun
a8dfc5ce3b Fix spelling in some documentation 2017-11-05 21:38:59 +00:00
Nick Craig-Wood
68d0b5adbb serve webdav: this implements a webdav server for any rclone remote. 2017-11-04 10:24:11 +00:00
Nick Craig-Wood
c4ad3ac94c vendor: ensure golang.org/x/net/webdav is vendored 2017-11-04 10:24:11 +00:00
Nick Craig-Wood
16e16bc220 serve http: use vfs to cache the directories and support Range header 2017-11-04 10:24:11 +00:00
Nick Craig-Wood
73dfa21ba3 local: avoid triggering the race detector 2017-11-04 10:24:11 +00:00
Nick Craig-Wood
c31556c6d1 vfs: Make sure all public methods are locked in Read and Write Handle 2017-11-04 10:24:10 +00:00
Nick Craig-Wood
2083ac6e2a vfs: add ECLOSED and tidy errors 2017-11-04 10:24:10 +00:00
Nick Craig-Wood
22ee839d05 cmount,vfs: unify Read and Write handles and File and Dir where possible 2017-11-04 10:24:10 +00:00
Nick Craig-Wood
5634659ea3 mount,vfs: unify Read and Write handles in preparation for ReadWrite handles 2017-11-04 10:24:10 +00:00
Nick Craig-Wood
e18122e88b vfs: add tests and subsequent fixes
* Tests for VFS layer
  * Small fixes found during testing
  * Fix Close, Flush and Release behaviour for ReadFileHandle and WriteFileHandle
  * Fix nil object bugs on File
2017-11-04 10:24:10 +00:00
Nick Craig-Wood
07ec8073fe mount: remove unused DirEntry struct 2017-11-03 13:00:00 +00:00
Nick Craig-Wood
8184ec4b70 vfs: add EPERM to errors 2017-11-03 13:00:00 +00:00
Nick Craig-Wood
190367d917 vfs: factor duplicated Open code into vfs from mount/cmount 2017-11-03 13:00:00 +00:00
Nick Craig-Wood
a5dc62f6c1 vfs: Make file handles compatible with OS
* Implement directory handles
  * Unify OpenFile
  * Add all the methods to match *os.File
  * Add StatParent and Rename methods to VFS
2017-11-03 13:00:00 +00:00
Nick Craig-Wood
3e0c91ba4b vfs: Move DefaultOpt to vfs and make some methods private 2017-11-03 13:00:00 +00:00
Nick Craig-Wood
7e065440fb vfs: rename Lookup to Stat to be more in keeping with os 2017-11-03 12:59:59 +00:00
Nick Craig-Wood
e8883e9fdb vfs: factor flags into vfsflags and remove global variables 2017-11-03 12:59:59 +00:00
Nick Craig-Wood
1a8f824bad vfs: use os package errors where possible 2017-11-03 12:59:59 +00:00
Nick Craig-Wood
c1aaff220d Factor new vfs module out of cmd/mountlib
This is an OS style file system abstraction with directory caching
used in mount, cmount, serve webdav and serve http.
2017-11-03 12:59:59 +00:00
Nick Craig-Wood
6da6b2556b mountlib: make directory entries be returned in sorted order 2017-11-03 12:59:59 +00:00
Nick Craig-Wood
ca19fd2d7e mountlib: Make read/write file handles support more standard interfaces
Including Read, ReadAt, Seek, Close for read handles and Write,
WriteAt, Close for read handles.
2017-11-03 12:59:59 +00:00
Nick Craig-Wood
2fac74b517 mountlib: store only Node in *Dir removing DirEntry struct 2017-11-03 12:59:59 +00:00
Nick Craig-Wood
8b6daaa877 mountlib: add DirEntry() to Node interface 2017-11-03 12:59:59 +00:00
Nick Craig-Wood
3af9d63261 mountlib: add Remove and RemoveAll methods to Node 2017-11-03 12:59:59 +00:00
Nick Craig-Wood
c6cd2a5280 mountlib: add parent and entry to Dir 2017-11-03 12:59:59 +00:00
Nick Craig-Wood
0bb84efe75 mountlib: Rename Remove to RemoveName 2017-11-03 12:59:59 +00:00
Nick Craig-Wood
3ec15ac2bd mountlib: make sure Node is always set in DirEntry
This simplifies the code and makes using the DirEntry.Node usable when
using ReadDir.
2017-11-03 12:59:58 +00:00
Nick Craig-Wood
750690503e mountlib: make Node satisfy os.FileInfo interface 2017-11-03 12:59:58 +00:00
Nick Craig-Wood
54950d3423 mountlib: make more useful as a general purpose file system adaptor 2017-11-03 12:59:58 +00:00
Nick Craig-Wood
014aa3d157 fstest: check no files or directories between runs 2017-11-03 12:59:58 +00:00
Nick Craig-Wood
cc7ed13b9b fs: factor test running code into fstest/run.go 2017-11-03 12:59:58 +00:00
Nick Craig-Wood
6552581a17 b2: correct docs on SHA1s on large files 2017-11-03 12:49:15 +00:00
Nick Craig-Wood
f60e2a7aac swift: add OS_TENANT_ID to config 2017-11-02 14:49:07 +00:00
Nick Craig-Wood
cacae8d12d swift: add OS_USER_ID to config
Also add env names to the config to make them easier to match.
2017-11-01 21:26:04 +00:00
Nick Craig-Wood
4a1013f2de swift: Allow configs with user id instead of user name 2017-10-31 14:23:10 +00:00
Nick Craig-Wood
d0b9baab13 Update travis builds to go 1.9.2 and go 1.8.5 2017-10-26 22:30:53 +01:00
Nick Craig-Wood
96665c16cb serve http: make it compile on go1.6 and go1.7 2017-10-26 21:52:29 +01:00
Nick Craig-Wood
39b9f80302 Add John Leach to contributors 2017-10-26 21:39:22 +01:00
John Leach
1602a3a055 Check if swift segments container exists before create
Avoids blindly trying to create the segments container, which can fail if the
authentication credentials don't allow container creates or updates.

Fixes #1769
2017-10-26 21:39:05 +01:00
Nick Craig-Wood
fafaea7edc Add Andrew Starr-Bochicchio to contributors 2017-10-26 21:35:19 +01:00
Andrew Starr-Bochicchio
e6fb96cfd4 Initial docs for usage with DigitalOcean Spaces. 2017-10-26 21:34:42 +01:00
Nick Craig-Wood
e612673ea0 webdav: fix Copy, Move and DirMove to be more compatible
The fix was to use an absolute URL in the Destination: as per RFC2518

This makes it compatible with the golang.org/x/net/webdav server
2017-10-25 22:59:22 +01:00
Nick Craig-Wood
fd2406f94e webdav: fix directory detection when creating a remote
Factor the is a directory check out and use it everywhere.
2017-10-25 12:04:20 +01:00
Nick Craig-Wood
cd146415d1 serve http: error if Range supplied (not supported yet)
Also add Server header
2017-10-24 23:18:36 +01:00
Nick Craig-Wood
2740c965c0 serve http: Fix timeouts 2017-10-24 23:07:46 +01:00
Nick Craig-Wood
6669165b6b serve http command to serve a remote over HTTP
This implements a basic webserver to serve an rclone remote over HTTP.

It also sets up the framework for adding more types of server later.
2017-10-24 13:25:49 +01:00
Nick Craig-Wood
a06bcd4c57 Add paypal.me link to donate page 2017-10-23 12:56:48 +01:00
Nick Craig-Wood
6df1f6fad1 webdav: support put.io #580
* Add docs on how to set up
  * Fix the listing routine
    * Use Depth: 1 in otherwise we get a recursive listing
    * Detect collections properly rather than relying on them ending in /
    * Add / to collection URLs which don't have one
2017-10-23 12:37:02 +01:00
Nick Craig-Wood
683befaec1 Add Jason Rose to contributors 2017-10-20 15:46:46 +01:00
ishuah
10f27e2ff2 allow trailing+leading whitespace for passwords - #1717
warn users when they enter passwords with leading/trailing whitespaces

Updated config_test.go, removing deprecated test case and updated TestReveal
2017-10-20 15:46:17 +01:00
Jason Rose
d121a94c20 Corrected default log-level value 2017-10-20 15:43:31 +01:00
Nick Craig-Wood
567071750b vendor: update github.com/ncw/swift to fix memory leak in swift transfers 2017-10-19 14:44:13 +01:00
Nick Craig-Wood
115053930e Make error messages less crypting when revealing an unobscured password - fixes #1743 2017-10-16 22:03:06 +01:00
Nick Craig-Wood
ef1346602e Add contributors
* thierry
  * Dan Dascalescu
  * Simon Leinen
2017-10-16 21:58:58 +01:00
Dan Dascalescu
9417194751 Fix dedupe description typo 2017-10-16 21:51:31 +01:00
Dan Dascalescu
69ba806528 2017-Oct update to the Drive docs 2017-10-16 21:50:08 +01:00
Dan Dascalescu
ae9d58d625 Copy edit the SFTP guide 2017-10-16 21:49:25 +01:00
Ubuntu
d6bab0169f Per-remote env variables start with RCLONE_CONFIG_ 2017-10-16 21:45:22 +01:00
Ubuntu
d7dd6f3814 Typo fix: resove -> resolve 2017-10-16 21:45:22 +01:00
Nick Craig-Wood
edfab09eb9 config: add sub commands for full config file management
Previously config sub commands were manually parsed rather than using
cobra.

Make config command have the following sub commands:

 * create    Create a new remote with name, type and options.
 * delete    Delete an existing remote <name>.
 * dump      Dump the config file as JSON.
 * edit      Enter an interactive configuration session.
 * file      Show path of configuration file in use.
 * providers List in JSON format all the providers and options.
 * show      Print (decrypted) config file, or the config for a single remote.
 * update    Update options in an existing remote.

The following changes were made to existing commands

 * listproviders was renamed to providers
 * listoptions was removed in favour of providing the output in providers
 * jsonconfig was renamed to create
 * an optional parameter was added to the show command
2017-10-14 11:50:41 +01:00
thierry
0575623dff Add config listproviders, listoptions, jsonconfig for automated config
Addition of a method listing the providers, a method listing the
options of a provider and method of manual configuration.
2017-10-13 17:17:36 +01:00
Nick Craig-Wood
fc8b13c993 moveto/copyto: Fix to allow copying to the same name - fixes #1736 2017-10-12 20:45:36 +01:00
Nick Craig-Wood
b531bf1349 Add android and IOS build to circleci 2017-10-11 13:40:02 +01:00
Nick Craig-Wood
43ced30f11 fs: Add more errors to retry - fixes #1733 2017-10-10 19:51:02 +01:00
Nick Craig-Wood
106bc1c9fc Add jersou to contributors 2017-10-10 19:44:44 +01:00
jersou
f64ee433b7 docs: missing "sync" command name fix 2017-10-10 19:44:19 +01:00
Nick Craig-Wood
3eb7f52e39 fs: Add "unexpected EOF reading trailer" as a retriable error - fixes #1730 2017-10-09 17:29:16 +01:00
Nick Craig-Wood
7f3dc9b5c4 Implement WebDAV remote #580
This has special knowledge of Owncloud and Nextcloud to enable more
functionality such as mod times.
2017-10-09 16:19:37 +01:00
Nick Craig-Wood
bcdd79320b rest: Add SetUserPass to create Authorization header 2017-10-09 16:19:37 +01:00
Nick Craig-Wood
2453abfbea rest: add a Signer callback 2017-10-09 16:19:37 +01:00
Nick Craig-Wood
efd88c5676 rest: add CallXML and DecodeXML functions 2017-10-09 16:19:37 +01:00
Nick Craig-Wood
4966611866 rest: factor URLJoin and URLEscape from http remote 2017-10-09 16:19:37 +01:00
Nick Craig-Wood
00fe6d95da fs: fix duplicate files causing spurious copies
Before this fix duplicate files (on Google Drive) caused the next file
to be spuriously copied.  `rclone dedupe` worked around the problem.
2017-10-02 16:52:53 +01:00
Nick Craig-Wood
b7521c0fe2 dropbox: fix error when renaming directories - fixes #1708 2017-10-02 11:21:16 +01:00
Nick Craig-Wood
a1d942e5c3 pcloud: make compile with go1.6 2017-10-01 16:41:23 +01:00
Nick Craig-Wood
9e9297838f Implement pcloud remote - #418 2017-10-01 11:37:35 +01:00
Nick Craig-Wood
6403242f48 drive, yandex: add missing CleanUpper interface check 2017-09-30 16:34:46 +01:00
Nick Craig-Wood
737cf3d957 rest: factor multipart upload out into function and generalise 2017-09-30 16:08:38 +01:00
Nick Craig-Wood
8f2f480628 rest: Add TransferEncoding and Close parameters 2017-09-30 16:03:47 +01:00
Nick Craig-Wood
a5e0115b19 Makefile: clean some more files 2017-09-30 16:02:00 +01:00
Nick Craig-Wood
63d0734c71 tree: remove workaround for tree library bug now it is fixed 2017-09-30 15:51:14 +01:00
Nick Craig-Wood
b017fcfe9a vendor: update all dependencies to latest versions 2017-09-30 15:27:27 +01:00
Nick Craig-Wood
911d121bb9 docs: Fix version number 2017-09-30 15:22:00 +01:00
Nick Craig-Wood
1c10497b68 Start v1.38-DEV development 2017-09-30 15:16:09 +01:00
Nick Craig-Wood
d96e45ba5b Version v1.38 2017-09-30 14:20:43 +01:00
Nick Craig-Wood
657b3a674d fs: fix test_all -clean to run just one cleaning thread per remote 2017-09-30 11:07:09 +01:00
Nick Craig-Wood
5177d8c854 docs: update website footer 2017-09-30 09:28:49 +01:00
Nick Craig-Wood
b2b989434d docs: use a shortcode to insert the version string 2017-09-30 09:28:49 +01:00
Nick Craig-Wood
3e9861eecf docs: improve links to cloud providers 2017-09-30 09:28:49 +01:00
Nick Craig-Wood
3fc69f4140 docs: fix daggers 2017-09-30 09:19:53 +01:00
Nick Craig-Wood
b1e85c7ceb website: Adapt to hugo v0.27.1 2017-09-30 09:19:53 +01:00
Nick Craig-Wood
1d994f7330 s3: add Wasabi instructions 2017-09-30 09:00:56 +01:00
Nick Craig-Wood
0e76e35b6f dropbox: Fix deprecation warnings for Move, MoveDir and Copy - fixes #1699 2017-09-30 08:10:51 +01:00
Nick Craig-Wood
29e2744155 vendor: update github.com/dropbox/dropbox-sdk-go-unofficial 2017-09-30 08:10:50 +01:00
Nick Craig-Wood
6390bb2b09 vendor: resync with dep ensure 2017-09-30 08:10:50 +01:00
Stephen Harris
6f2a6dfbc5 For MacOS installation, make sure the /usr/local/bin directory exists 2017-09-28 16:34:01 +01:00
Nick Craig-Wood
b6684ea4f5 crypt: fix PutStream
* Make crypt call the underlying PutStream not Put as it might be different
  * Make wrapped objects with size < 0 carry on having size < 0 after wrapping
2017-09-28 08:56:40 +01:00
Nick Craig-Wood
2857ed5c35 fs: fix --immutable tests on remotes which don't have modtime 2017-09-28 08:56:30 +01:00
Nick Craig-Wood
8771d352d4 Makefile: make test now stores logs and tests everything 2017-09-27 16:13:33 +01:00
Nick Craig-Wood
748c9f5cb7 docs: merge email addresses for @ishuah 2017-09-25 21:02:33 +01:00
Stefan Breunig
646a419453 docs: update overview table to reflect streaming upload ability 2017-09-24 21:59:31 +02:00
Nick Craig-Wood
c98dfa2556 Add ishuah to contributors 2017-09-24 20:03:11 +01:00
ishuah
7195e44dce crypt: added cryptdecode command - #1129 2017-09-24 20:02:59 +01:00
Nick Craig-Wood
c9e2739500 Add Jacob McNamee to contributors 2017-09-24 20:02:40 +01:00
Jacob McNamee
2d8e75cab4 Implement --immutable option 2017-09-24 20:00:00 +01:00
Stefan Breunig
5a3a56abd8 yandex: address errcheck warnings 2017-09-19 23:30:08 +02:00
Stefan Breunig
7b89a5f656 Add LingMan to contributors 2017-09-19 23:13:51 +02:00
LingMan
a4396ebe0f docs: remove duplicated --drive-auth-owner-only documentation (#1688) 2017-09-19 18:00:41 +02:00
Stefan
85877f3adc config: add show/file subcommands which print the config/its path (fixes #1086) 2017-09-19 17:59:19 +02:00
Nick Craig-Wood
87335de8a8 fs: fix filename normalization issues in the tests when running on OS X 2017-09-17 15:31:22 +01:00
Stefan Breunig
12405f9f41 fuse: re-use rcat to support uploads for all remotes (fixes #1672) 2017-09-16 22:49:08 +02:00
Stefan Breunig
168b0a0ecb googlecloudstorage: support streaming uploads (see #1614) (closes #1684) 2017-09-16 22:46:02 +02:00
Stefan
234bfae0d5 b2: implement streaming upload of files with unknown length (see #1614) (closes #1686) 2017-09-16 22:43:48 +02:00
Nick Craig-Wood
4ac9a65049 fs: stop normalizing file names but do a normalized compare in the sync
This works by using a transform function to transform file names when
doing a compare when matching file names in a directory.  rclone now
UTF-8 normalizes the file names and does a case insensitive compare if
the destination remote is case insensitive.

This deprecates the --local-no-unicode-normalization flag.

Fixes #1477
2017-09-16 19:49:31 +01:00
Nick Craig-Wood
a8e41f081c fs: re-implement check and cryptcheck using the same traversal as sync
This makes them 100% consistent with sync and also make them use less
memory as they no longer build the whole tree in memory first.

Fixes #1657
2017-09-16 19:49:31 +01:00
Nick Craig-Wood
261c7ad9e4 fs: make syncCopyMove use context for go routine cancellation 2017-09-16 19:49:31 +01:00
Nick Craig-Wood
fe96d5cf0a fs: factor multiple directory traverse out of sync 2017-09-16 19:49:31 +01:00
Nick Craig-Wood
8574129892 swift: fix server side copy to empty container with --fast-list
This was caused by an incorrect error return code from ListR when the
container did not exist.
2017-09-16 19:49:31 +01:00
Nick Craig-Wood
6df12b3f00 fs: improve retriable error detection 2017-09-16 19:48:49 +01:00
Stefan Breunig
7f8d306c9c s3: allow streaming upload of files with unknown file size (see #1614) 2017-09-15 20:20:32 +02:00
Stefan Breunig
9d3f11b493 amazonclouddrive, rcat: ensure rcat integration test passes even with AmazonCloudDrive (fixes: #1680) 2017-09-15 18:09:04 +02:00
Nick Craig-Wood
38cc211762 box: fix Update to send the correct name #97
This caused problems with the UTF Normalization with files being
continuously re-uploaded.
2017-09-15 12:03:08 +01:00
Nick Craig-Wood
e0eabc75c0 drive: change the default for --drive-use-trash to true - fixes #1661 2017-09-15 11:58:50 +01:00
Nick Craig-Wood
798502b204 fs: add more errors to be considered temporary errors
This makes a framework for adding temporary errors identified by
syscall number or by error string.

Fixes #1660
2017-09-14 18:01:43 +01:00
Stefan Breunig
9d22f4208f swift: implement streaming uploads (see #1614) 2017-09-14 07:42:16 +02:00
Stefan Breunig
56dedc49e3 rcat: properly report if the upload fails 2017-09-13 20:21:52 +02:00
Girish Ramakrishnan
2f0551074c s3: set session token when using STS 2017-09-12 22:59:29 +01:00
Nick Craig-Wood
d6eb625815 Add Girish Ramakrishnan to contributors 2017-09-12 09:30:03 +01:00
Girish Ramakrishnan
4c45cbea18 copy: error out if dst could not be listed 2017-09-12 09:29:44 +01:00
Nick Craig-Wood
897690d997 Add Jan Varho to contributors 2017-09-12 09:28:18 +01:00
Jan Varho
5a1351f141 s3: Document glacier transitions and behavior 2017-09-12 09:27:32 +01:00
Jan Varho
c22be38747 s3: Error message for objects in glacier 2017-09-12 09:27:32 +01:00
Oliver Heyme
f91f89d409 onedrive: Removed second browser authentication and enabled headless mode #254 2017-09-12 09:21:19 +01:00
Oliver Heyme
113f43ec42 oauthutil: Made GetToken and PutToken exported (required for OneDrive Business) 2017-09-12 09:21:06 +01:00
Oliver Heyme
7ef18b6b35 onedrive: Support for OneDrive for Business added #254
- 2 test fail (MimeType and modification date when copying)
- no headless setup
- uses the credentials for the "rclonetest" app I have created
2017-09-12 09:20:36 +01:00
Stefan Breunig
a91448c83a rcat: honor --dry-run even for small files 2017-09-11 22:28:16 +02:00
Stefan Breunig
80b1f2a494 rcat: configurable small files cutoff and implement proper upload verification 2017-09-11 08:26:53 +02:00
Stefan Breunig
57817397a0 rcat: directly upload small files without streaming them 2017-09-11 08:25:34 +02:00
Stefan
10fa2a7806 snapd: remove snapd because the build fails (see #1188, #1595, #1618) 2017-09-10 07:44:13 +02:00
Stefan
9a62d2f8ad Makefile: avoid using deprecated xargs arguments 2017-09-10 07:43:13 +02:00
ishuah91
49816e67bd yandex: implement cleanup (empty trash) - addresses #575 2017-09-08 11:37:39 +01:00
Jon Craton
fe536f3fa8 Typo fix in changelog 2017-09-06 16:13:24 +01:00
Nick Craig-Wood
c54d513bdd Add ishuah91 to contributors 2017-09-06 16:12:29 +01:00
ishuah91
dd975ab00d drive: implement cleanup (empty trash) - addresses #575 2017-09-06 16:12:00 +01:00
Nick Craig-Wood
2944f7603d s3: read 1000 items in listings #1653
This fixes directory listings with wasabi which fail if you supply
more than the allowed 1000 items as a parameter.  rclone used to
supply 1024 items which exceeds the spec - this works fine with
s3/ceph/etc but fails with wasabi.
2017-09-06 11:13:28 +01:00
Nick Craig-Wood
58f7b4ed7c Clarify --filter-from docs 2017-09-01 11:35:26 +01:00
Nick Craig-Wood
cbea06026a Make check obey --ignore-size - fixes #1643 2017-09-01 11:20:41 +01:00
Nick Craig-Wood
8207af9460 b2: Fix SHA1 mismatch when downloading files with no SHA1 #678
Some large files (depending on which version of rclone they were
uploaded with and where they were uploaded from) don't have an SHA1,
so we can't check it in that case.
2017-08-31 21:39:41 +01:00
Nick Craig-Wood
921fcc0723 Add Josiah White to contributors 2017-08-31 21:39:41 +01:00
Josiah White
445fc55772 Ignore return from patch request on failure. 2017-08-31 21:39:00 +01:00
Nick Craig-Wood
09fbbdbb04 Add Daniel Jagszent to contributors 2017-08-31 16:46:44 +01:00
Daniel Jagszent
4b0e983323 Local: Make documentation consistent with code
Change flag `--no-local-unicode-normalization` to `--local-no-unicode-normalization` since that's the way the flag is called in the source code.

Fixes #1633
2017-08-31 16:46:14 +01:00
wuyu
ee9f987234 qingstor: Support hash md5 for upload object
* Using single object to uploaded when files less than or equal to 67108864 bytes

 * Using multi-part object to uploaded when files large than 67108864 bytes, and
   calculate MD5SUMS in the upload process

 * For Mkdir and Rmdir, Add block to wait qingstor service sync status to
   handling extreme cases that try to create a just deleted bucket or delete
   a just created bucket etc
2017-08-31 16:41:08 +01:00
Nick Craig-Wood
f407e3da55 Add bpicode to contributors 2017-08-31 16:35:35 +01:00
bpicode
f1f7e0e6f9 support for zsh auto-completion - #983 2017-08-31 16:21:28 +01:00
bpicode
7e93567b18 vendor: update version of github.com/spf13/cobra for zsh support 2017-08-31 16:21:28 +01:00
Nick Craig-Wood
2c8d6e86cc fs: fix gofmt 2017-08-31 16:01:19 +01:00
cbruegg
bb6300b032 Fix bwlimit toggle in conjunction with schedules (Fixes #1607) 2017-08-31 15:33:29 +01:00
Nick Craig-Wood
e96c5b5f39 hubic: don't check the container exists before creating it
This fixes being able to create containers for Hubic.
2017-08-30 15:54:49 +01:00
Nick Craig-Wood
672c410235 Update to using go1.9 as the default go version
Get rid of Makefile spaghetti for avoiding vendor directory where
possible in make check.
2017-08-29 16:39:56 +01:00
Nick Craig-Wood
459cf64403 qingstor: fix errors in debug parameters noticed by go1.9 go vet 2017-08-29 14:19:14 +01:00
Stefan Breunig
0158ab6926 info: add check to stream files with unknown size 2017-08-22 08:00:10 +02:00
Stefan Breunig
4e189fe6e7 fstests: only test uploadswith indeterminate size on remotes that support it 2017-08-22 07:19:43 +02:00
Stefan Breunig
b78ecb1568 docs: add optional feature "streaming uploads" to overview table 2017-08-19 14:35:17 +02:00
Stefan Breunig
a122b9fa7a yandex: implement streaming uploads (see #1614) 2017-08-19 14:07:23 +02:00
Stefan Breunig
323daae63e http: immediately fail streaming uploads instead of spooling them first (see #1614) 2017-08-19 12:42:31 +02:00
Stefan Breunig
e754f50778 box: implement streaming uploads (see #1614) 2017-08-19 12:32:56 +02:00
Stefan Breunig
034cf22d4d Add Alex McGrath Kraak to contributors 2017-08-17 06:49:38 +02:00
Alex McGrath Kraak
2cc9071791 http: add --user-agent option. close #1557 2017-08-17 06:49:27 +02:00
Stefan Breunig
b510c70c1e b2: calculate missing hashes on the fly instead of spooling – fixes #1288 2017-08-12 12:57:34 +02:00
Stefan Breunig
001431d326 snapcraft: switch back to go build plugin and only build rclone – see #1188 2017-08-12 09:20:37 +02:00
Stefan Breunig
e64435a5c1 snapcraft: adjust snapcraft-dev build to allow fuse mounting – see #1188 2017-08-11 20:57:13 +02:00
Nick Craig-Wood
9c47b767b4 swift: Configure from environment vars and add endpoint_type - fixes #1542 2017-08-10 21:38:45 +01:00
Nick Craig-Wood
2870874329 azureblob: Read LastModified time of containers in root listing 2017-08-10 20:20:14 +01:00
Nick Craig-Wood
d54fca4e58 dropbox: fix entry doesn't belong in directory error - fixes #1558
This was caused by the unreliable casing in `path_lower` as returned
in the directory listings.  We now ignore everything except the last
element in `path_lower` which is guaranteed to have the correct case.
2017-08-10 13:57:06 +01:00
Nick Craig-Wood
dcbf538416 dropbox: stop using deprecated API methods 2017-08-10 13:57:06 +01:00
Nick Craig-Wood
5b79922b5e vendor: add dropbox/dropbox-sdk-go-unofficial 2017-08-10 13:57:06 +01:00
Nick Craig-Wood
41b2645dec vendor: remove ncw/dropbox-sdk-go-unofficial dependency 2017-08-10 13:57:05 +01:00
Nick Craig-Wood
76226e0147 dropbox: swap back to upstream dropbox/dropbox-sdk-go-unofficial
Now that dropbox/dropbox-sdk-go-unofficial#13 is fixed.
2017-08-10 13:57:05 +01:00
Nick Craig-Wood
76c5aa8533 gcs: Check for errors when testing bucket is OK in mkdir #1590
Previously we would check the bucket's status and on error we would
try to create it.  Now we only try to create it if we got a not found
error, otherwise we report the error to the user.
2017-08-10 10:29:21 +01:00
Nick Craig-Wood
265fb8a5e2 fs: Manage empty directories - fixes #100
During the sync we collect a list of directories which should be empty
and attempt to rmdir them at the end of the sync.  If the directories
are not empty then the rmdir will fail, logging a message but not
erroring the sync.
2017-08-09 21:07:00 +01:00
Nick Craig-Wood
8a1a900733 fstest: use Feature.CanHaveEmptyDirectories to sharpen tests
Now we actually test whether the directories are present or not,
filtering out empty directories in the test using the
CanHaveEmptyDirectories flag.
2017-08-09 20:55:08 +01:00
Nick Craig-Wood
20ae7d562b fs: Add CanHaveEmptyDirectories and BucketBased feature flags to all remotes 2017-08-09 20:55:08 +01:00
Nick Craig-Wood
c1bfdd893f vendor: update qingstor
dep ensure needed to do this, probably after various vendor merges
2017-08-09 13:03:07 +01:00
Nick Craig-Wood
ec2ea37ad2 fs: Add --disable flag to disable optional features - fixes #1551
Eg to disable server side copy use `--disable copy`, to see a list of
what you can disable, `--disable help`.
2017-08-07 21:34:45 +01:00
Nick Craig-Wood
bced73c947 sftp: fix compile for go1.6 2017-08-07 21:34:05 +01:00
Nick Craig-Wood
5b6585f57d sftp: limit new connections per second 2017-08-07 19:47:49 +01:00
Nick Craig-Wood
c6b844977a sftp: clear the cached hashes on object update 2017-08-07 17:36:59 +01:00
Nick Craig-Wood
47eab397ba sftp: implement connection pooling for multiple ssh connections
A connection may be opened for each `--transfers` and `--checkers`
now.  Connections are checked when putting them in the pool and
getting them out the pool so it should recover from network errors
much better.

This fixes #1561, fixes #1541, fixes #1381, fixes #1158, fixes #1538
2017-08-07 17:19:37 +01:00
Nick Craig-Wood
bfe812ea6b dedupe: implement merging of duplicate directories - fixes #1243 2017-08-07 15:36:41 +01:00
Nick Craig-Wood
db1995e63a Add MergeDirs optional interface and implement it for drive 2017-08-07 15:32:47 +01:00
Nick Craig-Wood
81a2ab599f fs: add optional ID to fs.Directory and set it in the remotes which care 2017-08-07 15:31:22 +01:00
Nick Craig-Wood
74687c25f5 sftp: fixup formatting and golint warnings 2017-08-07 14:50:31 +01:00
Nick Craig-Wood
d025066fae Add Christian Brüggemann to contributors 2017-08-06 11:50:20 +01:00
Christian Brüggemann
80ce569874 sftp: Add support for md5 and sha1 hashes where available 2017-08-06 11:49:52 +01:00
Nick Craig-Wood
ee13ea74f1 box: fix multipart upload giving "parts_mismatch" error #97 2017-08-05 21:01:32 +01:00
Stefan Breunig
40f24e0ea3 config: use absolute ConfigPath to ensure newly written config is on the same mount - fixes #1569 2017-08-05 12:13:25 +02:00
Stefan Breunig
b523cfc01d oauthutil: don't show "save failed" error when setting up new remote – fixes #1466 2017-08-05 12:04:42 +02:00
Nick Craig-Wood
38dabcf6b2 azure: correct docs on MD5 and chunked files 2017-08-04 23:54:57 +01:00
Nick Craig-Wood
ee6a35d750 Test compilation of all arches
* Add compile_all step to Makefile
  * Add this to travis
  * Add -compile-only flag to cross-compile.go to save time making the zips
2017-08-04 23:20:26 +01:00
Nick Craig-Wood
92d2e1f8d7 azureblob: rework and complete #801
* Fixup bitrot (rclone and Azure library)
  * Implement Copy
  * Add modtime to metadata under mtime key as RFC3339Nano
  * Make multipart upload work
  * Make it pass the integration tests
  * Fix uploading of zero length blobs
  * Rename to azureblob as it seems likely we will do azurefile
  * Add docs
2017-08-04 22:56:16 +01:00
Nick Craig-Wood
98d238daa4 Add Andrei Dragomir to contributors 2017-08-04 22:56:16 +01:00
Andrei Dragomir
036fd61a50 Added Azure Blob storage support #801 2017-08-04 22:54:27 +01:00
Nick Craig-Wood
91cfcc21ff vendor: add github.com/Azure/azure-sdk-for-go and dependencies 2017-08-04 22:54:27 +01:00
Nick Craig-Wood
132f71d504 qingstor: add missing file to fix plan9 build 2017-08-04 22:54:27 +01:00
Stefan Breunig
861e125a4f local: revert to copy when moving file across file system boundaries – fixes #1176 2017-08-04 23:27:32 +02:00
Stefan Breunig
230e65313a snapcraft: slighty improve buildfile (see #1188) 2017-08-04 21:37:25 +02:00
Nick Craig-Wood
8a185deefa qingstor: Fixes before merge
* use rclone's http.Client for bwlimit, logging, etc
  * remove extraneous fmt.Sprintf from logging
  * fix icon in docs
  * add docs about --fast-list
  * hoist md5 regexp compilation out of function
  * create container if necessary on server side copy
  * keep note of whether the container has been deleted
  * build constraint not to compile for plan9
2017-08-04 19:37:53 +01:00
Nick Craig-Wood
7b9557df90 Add wuyu to contributors 2017-08-04 19:37:53 +01:00
wuyu
ec5b72f8d5 Add new QingStor remote
Add new package qingstor to support QingStor API.

Add new unit test for its and tested through; But I commented
on some tests case because of some of the features of QingStor.

Add new docs for it.
2017-08-04 17:25:47 +01:00
wuyu
466dd22b44 vendor: add qingstor-sdk-go for QingStor 2017-08-04 17:09:28 +01:00
Nick Craig-Wood
f682002b84 fs: Make tests create a new bucket rather than purging the old one
This enables QingStor to pass the tests as it has a 2 minute lockout
on deleting the old bucket then creating it again.
2017-08-04 17:09:28 +01:00
Nick Craig-Wood
7d34caac83 cmd: add os and go version to rclone version output 2017-08-04 14:25:55 +01:00
Stefan Breunig
28a18303f3 implement rcat – fixes #230, fixes #1001 2017-08-03 21:42:35 +02:00
Nick Craig-Wood
3e3a59768e fs/test_all: fix after fstest factorisation 2017-08-03 20:01:05 +01:00
Nick Craig-Wood
d4b9bb9894 gen_tests: allow specification of a build tag 2017-08-03 20:01:05 +01:00
Nick Craig-Wood
e01741b557 fs: Cleaning up directories in test is no longer needed
..as it is done in the finalise method.
2017-08-03 20:01:05 +01:00
Nick Craig-Wood
7ec24ad67a fstests: Use a different container after the Rmdir
Use a new directory here.  This is for the container based remotes
which take time to create and destroy a container (eg azure blob)
2017-08-03 20:01:05 +01:00
Nick Craig-Wood
eff10bbc1d Add Oliver Heyme to contributors 2017-08-03 20:01:05 +01:00
Oliver Heyme
73f7278497 oauthutil: Added AuthOptions and shuts down the web server properly
1. This makes AuthOptions a parameter for doConfig, Config and ConfigOffline to enable a Fs to add additional options (required for OneDrive for Business)
2. Fix to properly shutdown the webserver recieving the auth information (go1.8)
2017-08-03 19:59:42 +01:00
Nick Craig-Wood
6d59887487 Fix URL encoding issues - fixes #1573
This fixes the confusion between paths which were URL encoded and
paths which weren't.  In particular it allows files to have % in the
name.
2017-08-02 13:19:36 +01:00
Nick Craig-Wood
21aca68680 tree: fix when running under Windows 2017-08-01 14:46:21 +01:00
Nick Craig-Wood
214f5e6411 http: only run the tests on go1.8+ 2017-08-01 12:38:29 +01:00
Nick Craig-Wood
2b5ce6ef51 http: Fix directories with : in #1555 2017-07-31 23:15:31 +01:00
Nick Craig-Wood
b0fd187cba http: fix panic with url encoded content - fixes #1565
This fixes the issue which caused the panic (carrying on after an
error) and the issue which caused the error (double unescaping the
URL).
2017-07-30 23:16:32 +01:00
Nick Craig-Wood
c3cd247d4b Document --dump-bodies using lots of memory - fixes #1516 2017-07-30 10:02:14 +01:00
Nick Craig-Wood
5d911e9450 pacer: Factor TokenDispenser into pacer from box remote 2017-07-29 23:14:47 +01:00
Nick Craig-Wood
a56d51c594 Add Andy Pilate to contributors 2017-07-27 21:18:37 +01:00
Andy Pilate
ef328c5497 Fixes typo in command dedupe definition 2017-07-27 21:17:57 +01:00
Andy Pilate
49e4cdb8b9 Added information about Drive server copies limits 2017-07-27 21:17:24 +01:00
Stefan Breunig
ee52365e88 doc: add FAQ entry for "tcp lookup no such host" - fixes #683 2017-07-27 18:20:25 +02:00
Nick Craig-Wood
f3060caf04 Implement tree command - fixes #1528 2017-07-26 23:06:48 +01:00
Nick Craig-Wood
bfef0bc2e9 vendor: add github.com/a8m/tree 2017-07-26 23:06:48 +01:00
Nick Craig-Wood
da9926d574 vendor: update golang.org/x/sys
Now that https://github.com/golang/go/issues/21136 is fixed
2017-07-26 22:56:17 +01:00
Nick Craig-Wood
ebc8361933 mount: Add notes on Windows limitations from Bill Zissimopoulos 2017-07-26 21:08:24 +01:00
Nick Craig-Wood
71fe046937 fs: Add Find method to DirTree 2017-07-26 16:38:53 +01:00
Nick Craig-Wood
d5ff7104e5 fs: Implement NewDirTree for non --fast-list 2017-07-26 16:38:44 +01:00
Nick Craig-Wood
cd4895690a fstest: Factor test initialisation into Initialise() 2017-07-26 16:38:33 +01:00
Nick Craig-Wood
1ecf2bcbd5 fs: fix typo in --bind description 2017-07-23 23:08:33 +01:00
Nick Craig-Wood
c3d6cc91ec Fix --bind flag changes under go1.6
Correcting 9f24639568
2017-07-23 22:36:32 +01:00
Nick Craig-Wood
6fce1ac267 vendor: roll back golang.org/x/sys to fix compile
Until https://github.com/golang/go/issues/21136 is fixed
2017-07-23 22:24:24 +01:00
Nick Craig-Wood
9f24639568 Add --bind flag for choosing the local addr on outgoing connections - fixes #1087
Supported by all remotes except FTP.
2017-07-23 16:27:39 +01:00
Nick Craig-Wood
8b30023f0d Update MAINTAINERS with how to update the authors file. 2017-07-23 15:06:11 +01:00
Nick Craig-Wood
c507836617 Add Zhiming Wang to contributors 2017-07-23 15:02:19 +01:00
Zhiming Wang
6152bab28d local: add --skip-links to suppress symlink warnings
Give users a way to explicitly acknowledge that symlinks are to be skipped
without warnings.

Fixes #1480.
2017-07-23 15:02:02 +01:00
Nick Craig-Wood
6ae29df4d7 Add commit message and updating a backend sections to CONTRIBUTING 2017-07-23 13:23:42 +01:00
Nick Craig-Wood
de54fd4c64 mount: add docs for windows install 2017-07-23 13:05:02 +01:00
Nick Craig-Wood
859721f3cf Add John Papandriopoulos to contributors 2017-07-23 13:05:02 +01:00
John Papandriopoulos
d134d78979 b2: add --b2-hard-delete to permanently delete instead of hide files - Fixes #1547 2017-07-23 13:02:42 +01:00
Nick Craig-Wood
7b81f12dad box: add docs
* reorder remotes so they are in alphabetical order by full name everywhere
  * update CONTRIBUTING doc
2017-07-23 11:32:34 +01:00
Nick Craig-Wood
d279161cee Implement box storage remote - #97 2017-07-23 11:32:34 +01:00
Nick Craig-Wood
b5bf819256 acd,b2,crypt,drive: add missing upload options 2017-07-23 11:32:34 +01:00
Nick Craig-Wood
384724fd11 rest, b2, onedrive: remove Absolute parameter from rest.Opts and replace with RootURL 2017-07-23 11:32:34 +01:00
Nick Craig-Wood
5f70746d39 rest: Allow RootURL to be overridden 2017-07-23 11:32:34 +01:00
Nick Craig-Wood
088806ba4c rest: add Parameters field to opts for adding URL parameters 2017-07-23 11:32:34 +01:00
Nick Craig-Wood
45ba4ed594 rest: implement multipart uploads 2017-07-23 11:32:34 +01:00
Nick Craig-Wood
edfa1b3a69 oauthutil: fix panic from use of nil context 2017-07-23 11:32:34 +01:00
Nick Craig-Wood
db6009126d Fix test failure with new stretchr/testify - fixes #1550 2017-07-23 08:59:07 +01:00
Nick Craig-Wood
5255cbf5e3 Update godep as part of vendor update 2017-07-23 08:51:57 +01:00
Nick Craig-Wood
eb87cf6f12 vendor: update all dependencies 2017-07-23 08:51:42 +01:00
Nick Craig-Wood
0b6fba34a3 Fix fetch_windows target in Makefile 2017-07-22 20:44:09 +01:00
Nick Craig-Wood
c8b5ee1e54 Start v1.37-DEV development 2017-07-22 20:43:06 +01:00
Nick Craig-Wood
a73ecec11f Version v1.37 2017-07-22 20:04:29 +01:00
Nick Craig-Wood
c223464cd0 mount: fix panic on renames - fixes #1533
Make sure d.items is not nil and improve locking
2017-07-22 11:00:51 +01:00
Nick Craig-Wood
39d09c04a2 drive: Make --drive-trashed-only show all directories - fixes #1524
Without showing all directories it doesn't show trashed files which
are in an untrashed directory.

This isn't an ideal fix, but it makes the feature useable.
2017-07-22 10:03:27 +01:00
Stefan Breunig
db5494b316 document SIGUSR2 to toggle bandwidth limiter (fixes #1424) 2017-07-22 10:49:45 +02:00
Stefan Breunig
c3dab09a94 add Yaroslav Halchenko to contributors 2017-07-22 10:28:12 +02:00
Yaroslav Halchenko
3ddcbce989 DOC: any empty directoryies -> empty directories (fixes #1546) 2017-07-22 10:24:41 +02:00
Nick Craig-Wood
0cf19ef66a Make ListDirSorted check for subdirectories and write test 2017-07-19 09:36:27 +01:00
Nick Craig-Wood
655891170f Check in ListDirSorted that the directory entries all belong 2017-07-18 23:39:42 +01:00
Nick Craig-Wood
93423a0812 swift: fix zero length directory markekrs showing in the subdirectory listing
This was causing lots of duplicated files to be copied.
2017-07-18 23:38:48 +01:00
Nick Craig-Wood
78f33f5d6e Add gdm85 to contributors 2017-07-18 15:16:17 +01:00
gdm85
209b7da3b2 gcs: Add ability to specify location and storage class via config and command line
* Add gcs-location and gcs-storage-class options for Google Cloud Storage
* Added config options (same as S3)
* Updated configuration example in documentation for Google Cloud Storage
2017-07-18 15:15:29 +01:00
Nick Craig-Wood
6f71260acf Add --tpslimit and --tpslimit-burst to limit transactions per second for HTTP
This is useful if you are being rate limited or banned by your cloud
storage provider.
2017-07-16 17:25:39 +01:00
Nick Craig-Wood
ec6c3f2686 vendor: remove github.com/tsenart/tb 2017-07-16 16:14:44 +01:00
Nick Craig-Wood
62e28d0a72 Replace token bucket limiter github.com/tsenart/tb with golang.org/x/time/rate
In tests tsenart/tb has proved inaccurate at low rates.
2017-07-16 16:14:44 +01:00
Nick Craig-Wood
470642f2b7 vendor: add vendor/golang.org/x/time/rate 2017-07-14 05:35:00 +01:00
Nick Craig-Wood
b5002eb6a4 drive: document google docs sometimes fail to download 2017-07-10 23:15:30 +01:00
Nick Craig-Wood
ee5698b3a9 drive: Add docs on duplicated files, and re-copying 2017-07-09 23:32:34 +01:00
Nick Craig-Wood
728ff231ab Link wiki from main website - fixes #1156 2017-07-09 22:48:52 +01:00
Nick Craig-Wood
542f938ce2 website: Decrease spacing between menu items
...as they were overflowing the page before.  Thanks to Amy Craig-Wood
for CSS wrangling!
2017-07-09 22:48:26 +01:00
Nick Craig-Wood
e24d0ac94d Add slack invite to website menu - fixes #1145 2017-07-08 22:30:35 +01:00
Nick Craig-Wood
da2e2544ee Fix tests on Windows 2017-07-08 16:26:41 +01:00
Nick Craig-Wood
72add5ab27 sync: state whether duplicates are objects are directories 2017-07-08 15:42:18 +01:00
Nick Craig-Wood
9ac72ee53f Make commit number in beta version tag be 3 digits always 2017-07-07 21:31:52 +01:00
Nick Craig-Wood
c3dac2e385 dropbox: fix large directory listings 2017-07-07 21:20:07 +01:00
Nick Craig-Wood
92294a4a92 drive: Add --drive-trashed-only and remove obsolete --drive-full-list
* Add --drive-trashed-only to show only the contents of the trash
  * Remove --drive-full-list as it is obsolete
  * Tidy the docs for the drive options
2017-07-06 15:32:57 +01:00
Nick Craig-Wood
69ff009264 Use a stable sort for sorting directory entries
This is useful if there are duplicates. Assuming the remote delivers
the entries in a consistent order, this will give the best user
experience in syncing as it will consistently use the first entry for
the sync comparison.
2017-07-06 14:07:26 +01:00
Nick Craig-Wood
27b157580e Move make_test_files.go into bin 2017-07-06 11:54:57 +01:00
Nick Craig-Wood
3f288bc9ea Added decrypt_names.py to help decoding encrypted logs 2017-07-06 11:53:39 +01:00
Nick Craig-Wood
ce1b9a7daf swift,hubic: fix paged directory listings
This was caused by rclone adjusting the object names.  If the last
object in the listing page happened to be a directory, rclone would
remove the / which caused the next page to start in the wrong place.
2017-07-06 11:31:37 +01:00
Nick Craig-Wood
f0512d1a52 Fix missing fs.Dir -> fs.Directory 2017-07-06 11:31:36 +01:00
Stefan Breunig
51866fbd34 drive: add missing seek to start on retries of chunked uploads
follow up to ee13bc6775
2017-07-05 18:52:04 +02:00
Stefan Breunig
ee13bc6775 drive: fix stats accounting for upload - fixes #970, #968 2017-07-04 19:56:46 +02:00
Nick Craig-Wood
e86f62c3e8 Add rclone info internal command for testing out limits of remotes 2017-07-03 15:05:27 +01:00
Nick Craig-Wood
6c3bf629a1 yandex: fix fs.Name()
Put in tests for fs.Root() and fs.Name() for all remotes
2017-07-03 13:39:31 +01:00
Nick Craig-Wood
575e779b55 Warn about duplicate files when syncing - fixes #1506
Error about unsorted directories and test thoroughly
2017-06-30 21:24:13 +01:00
Nick Craig-Wood
dc56ad9816 sftp, local: refactor to stop storing os.FileInfo in preparation for serialization 2017-06-30 14:27:27 +01:00
Nick Craig-Wood
e7d04fc103 Create fs.Directory interface and use it everywhere 2017-06-30 14:26:59 +01:00
Nick Craig-Wood
e2d7d413ef fs: rename BasicInfo to DirEntry 2017-06-30 14:26:58 +01:00
Nick Craig-Wood
e7e9aa0dfa fs: Remove unused ListFser interface 2017-06-30 14:26:58 +01:00
Nick Craig-Wood
f88300a153 Don't Mkdir at the start of sync - fixes #1131
This is possible now that the bucket based remotes will create the
buckets on demand (9c1e703777).
2017-06-29 12:31:53 +01:00
Nick Craig-Wood
e54087ece1 Fix config tests to save configData which fixes subsequent tests 2017-06-29 12:31:53 +01:00
Nick Craig-Wood
54561fd2bc s3: work around eventual consistency in bucket creation
Deleting a bucket then testing its existence can give the wrong
result.  Work around by keeping a flag as to whether we have deleted
the bucket.
2017-06-29 12:31:52 +01:00
Nick Craig-Wood
479c5a514a swift, s3, gcs: create container if necessary on server side copy 2017-06-28 21:16:07 +01:00
Nick Craig-Wood
f3c7e1a9dd Debug directory creation and removal - fixes #1192 2017-06-27 22:19:35 +01:00
Nick Craig-Wood
70b5b2f5c6 acd, onedrive: fix initialization order for token renewer - fixes #1442 2017-06-27 22:19:35 +01:00
sainaen
d7811f72ad Clarify how 'move' may use server side copying 2017-06-26 22:54:14 +01:00
Nick Craig-Wood
aa20486485 Add --stats-log-level so can see --stats without -v - fixes #1180
The most common use for this flag is likely to be showing the stats
without using -v by using `--stats-log-level NOTICE`.
2017-06-26 22:50:37 +01:00
Nick Craig-Wood
33f302a06b Document workaround for files/dirs with : in - fixes #1331 2017-06-26 16:13:12 +01:00
Nick Craig-Wood
24cb739d1f b2: reduce minimum chunk size to 5MB - fixes #1289 2017-06-26 16:02:46 +01:00
Nick Craig-Wood
f0abd6173d Add Harshavardhana and sainaen to contributors 2017-06-26 12:37:00 +01:00
sainaen
1817d8f631 crypt: Fix typo in cryptcheck's short description 2017-06-26 12:35:20 +01:00
sainaen
a308ad5bd7 Fix typos and punctuation in the 'docs.md'
* Add commas to introductory phrases ('However', 'First', 'For example')
* Consistently capitalize provider names
* Fix some typos ('bandwith', 'integriTIty', etc.)
2017-06-26 12:35:20 +01:00
Nick Craig-Wood
b360527931 mount: fix hang on errored upload
In certain circumstances if an upload failed then the mount could hang
indefinitely. This was fixed by closing the read pipe after the Put
completed.  This will cause the write side to return a pipe closed
error fixing the hang.

Fixes #1498
2017-06-26 12:08:51 +01:00
Stefan Breunig
52b042971a keep file permissions and try to keep user/group on supported systems (fixes #1467) 2017-06-25 09:05:24 +02:00
Stefan Breunig
2d2778eabf don't delete remote if name does not change while renaming (fixes #1495) 2017-06-25 08:55:54 +02:00
Nick Craig-Wood
d55f8f0492 sftp: add support for using ssh key files #1494
Update docs about macOS and ssh-agent #1218
2017-06-23 16:25:35 +01:00
Nick Craig-Wood
b44d0ea088 drive: convert / in names to a unicode equivalent (/) - fixes #62 2017-06-20 21:27:14 +01:00
Nick Craig-Wood
d981456ddc Add Vasiliy Tolstov to contributors 2017-06-20 21:27:14 +01:00
Nick Craig-Wood
b22c4c4307 http: fix, tidy and rework ready for release
* Fix remaining problems
  * Refactor to make testing easier and add a test suite
  * Make path parsing more robust.
  * Add single file operations
  * Add MimeType reading for objects
  * Add documentation
  * Note go1.7+ is required to build
2017-06-20 21:27:14 +01:00
Nick Craig-Wood
afc8cc550a http: Update interfaces for List/ListR/Put/Update 2017-06-20 21:27:14 +01:00
Vasiliy Tolstov
83b642e98f fix for caddy web server
Signed-off-by: Vasiliy Tolstov <v.tolstov@selfip.ru>
2017-06-20 21:27:14 +01:00
Nick Craig-Wood
d5d635b7f3 http: Fix comments, remove optional methods which don't work 2017-06-20 21:27:14 +01:00
Vasiliy Tolstov
6b89e6c381 add new http remote filesystem
Signed-off-by: Vasiliy Tolstov <v.tolstov@selfip.ru>
2017-06-20 21:27:14 +01:00
Nick Craig-Wood
be0dd09801 vendor: golang.org/x/net/html for http 2017-06-20 21:27:14 +01:00
Nick Craig-Wood
b76cd4abd2 Fix Range header option 2017-06-20 21:27:14 +01:00
Nick Craig-Wood
0dbf1230bc Update CONTRIBUTING with --fast-list 2017-06-20 21:27:14 +01:00
Nick Craig-Wood
4fd9570332 fs: Use an in place filter in ListDirSorted 2017-06-20 21:27:14 +01:00
Harshavardhana
8d77e48190 Minio supports ETags and metadata.
Current doc mentioned lack of ETag and metadata
support which since has been long fixed in many
upstream Minio releases.

Also cleanup the doc to show new startup banner etc.
2017-06-20 08:21:02 +01:00
Nick Craig-Wood
dcce65b2b3 mount/cmount: factor duplicated code into mountlib 2017-06-19 14:36:51 +01:00
Nick Craig-Wood
4ce31555b2 vendor: update github.com/billziss-gh/cgofuse - fixes #1481 2017-06-19 09:53:34 +01:00
Nick Craig-Wood
5ed4bc97f3 travis: reduce number of parallel builds to avoid "Killed" error 2017-06-19 08:16:35 +01:00
Nick Craig-Wood
54e37be591 Only test with -race using go latest 2017-06-19 08:07:50 +01:00
Nick Craig-Wood
eaa717b88a Fix crypt obfuscate tests with Windows 2017-06-18 22:53:19 +01:00
Nick Craig-Wood
bbbc202ee6 Add ftp.md to docs builder and update docs 2017-06-15 20:12:26 +01:00
Nick Craig-Wood
97364fd0b6 ncdu: disable on plan9 and solaris as termbox isn't supported there 2017-06-15 20:10:54 +01:00
Nick Craig-Wood
c34f11a92f rclone ncdu for exploring a remote with a text based user interface. 2017-06-15 17:44:17 +01:00
Nick Craig-Wood
e31fc877e2 vendor: github.com/nsf/termbox-go and dependencies for rclone ncdu 2017-06-15 16:46:32 +01:00
Nick Craig-Wood
e069fc439e crypt: use an in place filter for encrypting directory entries 2017-06-15 16:46:32 +01:00
Nick Craig-Wood
5250fcdf08 core: fix data race in walk
This was detected by the race detector when the client of Walk() sorted entries.
2017-06-15 16:46:32 +01:00
Edward Q. Bridges
9876ba53f8 Updated permissions
As it happens, after testing the `GetObject` permission is also required to do `HEAD` requests on a given object.
2017-06-14 17:29:21 +01:00
Nick Craig-Wood
64662bef8d Deprecate --old-sync-method it is replaced with --fast-list
Remove old sync method code.
2017-06-14 16:49:40 +01:00
Nick Craig-Wood
0b8d9084fc test_all: print command line so it can be cut and pasted into bash 2017-06-14 16:49:40 +01:00
Nick Craig-Wood
7be49249d3 Add lsjson command - fixes #1063 2017-06-14 16:49:40 +01:00
Nick Craig-Wood
8a6a8b9623 Change List interface and add ListR optional interface
This simplifies the implementation of remotes.  The only required
interface is now `List` which is a simple one level directory list.

Optionally remotes may implement `ListR` if they have an efficient way
of doing a recursive list.
2017-06-14 16:49:40 +01:00
Nick Craig-Wood
6fc88ff32e Use --fast-list flag for sync/copy/move - fixes #1277
Redo test framework to take a -fast-list flag and test remotes with that flag.
2017-06-14 16:49:40 +01:00
Nick Craig-Wood
50928a5027 Implement --fast-list flag.
This is supported remotes which can do a recursive listing.  It will
use more memory.

This is related to #1277 but doesn't fix that issue yet.
2017-06-14 16:49:40 +01:00
Nick Craig-Wood
3a431056e2 gcs, swift: increase directory listing chunk to 1000 to increase performance 2017-06-14 16:49:40 +01:00
Nick Craig-Wood
53c3e5f0ab Add placeholder support for ListR interface.
The ListR interface will be implemented by remotes that can do a
recursive directory listing more efficiently than just recursing
through the directories.  These include the bucket based remotes.
2017-06-14 16:49:40 +01:00
Nick Craig-Wood
0edb025257 Fixup tests with dirs vs bucket based fs 2017-06-14 16:49:40 +01:00
Nick Craig-Wood
fded4dbea2 yandex: correct error return for listing empty directory 2017-06-14 16:49:40 +01:00
Nick Craig-Wood
7e20e16cff core: Implement Walk directory listing and use in place of Lister
This is in preparation for removing the Lister code and replacing the
fundamental operation in the Fs with listing a single directory.
2017-06-14 16:49:40 +01:00
Nick Craig-Wood
1e88f0702a dropbox: fix oauth configuration
This was broken in c59a292719
2017-06-14 16:46:46 +01:00
Nick Craig-Wood
68333d34a1 dropbox: make setting mod time on existing files work properly
This is a fix left over from the v2 conversion.  Dropbox ignores the
client modification on an incoming file if it was identical to the
existing file.  This change deletes the existing file first before
re-uploading the new one.
2017-06-13 13:58:39 +01:00
Nick Craig-Wood
740b3f6ae2 Fix problems found with ineffassign 2017-06-13 11:52:36 +01:00
Nick Craig-Wood
28fcc53e45 mount test: retry umount as it fails occasionally
This is because of the background releasing of files which happens
after all the files are closed.
2017-06-13 10:52:10 +01:00
Nick Craig-Wood
2ca477c57f swift: make sensible error if the user forgets the container - fixes #1470 2017-06-10 14:44:56 +01:00
Nick Craig-Wood
9a11d3efd9 Revert "Start Cat tests from 2 as onedrive doesn't support ranging from 1"
Now that https://github.com/OneDrive/onedrive-api-docs/issues/543 is
fixed, this can be reverted.

This reverts commit 320c53eab0.
2017-06-10 13:48:00 +01:00
Nick Craig-Wood
10d5377ed8 acd: remove revoked credentials, allow oauth proxy config and update docs 2017-06-10 12:02:34 +01:00
Nick Craig-Wood
ee14efd3c2 config: fix menu selection when no remotes 2017-06-10 11:39:40 +01:00
Nick Craig-Wood
b4be7d65a6 Update build to go1.8.3 2017-06-09 12:06:28 +01:00
Nick Craig-Wood
52e1bfae2a oauth: Allow auth_url and token_url to be set in the config file
If set in the config file, these override the ones configured into the
remote.  This enables alternative oauth servers to be used for all
oauth remotes.  This can only be altered by editing the config file
for the moment.
2017-06-08 20:35:32 +01:00
Nick Craig-Wood
9c1e703777 swift, b2, gcs, s3: Fix moveto and copyto
We now make sure the container/bucket is created before creating any objects.
2017-06-07 14:34:59 +01:00
Nick Craig-Wood
b49821956a Fix copyto/moveto test error (see #1261) 2017-06-07 14:08:46 +01:00
Nick Craig-Wood
a61ba1e7c4 moveto, copyto: report transfers and checks as per move and copy 2017-06-07 13:02:21 +01:00
Nick Craig-Wood
d30cc1e119 Factor RemoteSplit into fs 2017-06-07 12:27:33 +01:00
Nick Craig-Wood
74a3dfc4e1 Fix TestHashSums 2017-06-06 23:21:47 +01:00
Nick Craig-Wood
3fe9448229 drive, acd, onedrive: Cache the directory IDs when reading the parent directory
This makes directory listings much more efficient (one less
transaction needed) and also fixes #1439 (which was caused by having
to look up a directory name with quotes in which isn't dealt with well
by the list routine) by not doing a directory lookup at all.
2017-06-05 12:26:30 +01:00
Nick Craig-Wood
a5cfdfd233 drive: add team drive support - fixes #885 2017-06-04 22:38:29 +01:00
Nick Craig-Wood
bdc19b7c8a fstests: fix -remote flag to override test target 2017-06-04 22:38:29 +01:00
Nick Craig-Wood
e92cc8fe2b Add Edward Q. Bridges to contributors 2017-06-04 22:38:10 +01:00
Edward Q. Bridges
6ee4c62cae Add section on required IAM permissions.
cf.: https://github.com/ncw/rclone/issues/1455
2017-06-04 22:37:17 +01:00
Nick Craig-Wood
b047402294 config: Fix save of temp file under Windows - fixes #1458 2017-06-01 16:38:19 +01:00
Nick Craig-Wood
7693cecd17 Add Fabian Möller to contributors 2017-06-01 16:23:48 +01:00
Fabian Möller
558f014d43 migrate Gopkg.toml and Gopkg.lock to new format
Update Gopkg.toml and Gopkg.lock to follow the breaking changes
introduced by https://github.com/golang/dep/pull/644
2017-06-01 16:23:13 +01:00
Nick Craig-Wood
48508cb5b7 Add Ruwbin to contributors 2017-06-01 09:03:56 +01:00
Ruwbin
44c98e8654 fix docs typos 2017-06-01 09:03:19 +01:00
Stefan Breunig
9782c264e9 hand dirCacheTime through again 2017-06-01 09:02:22 +01:00
Stefan
9cede6b372 fully write new config file before moving to target location (fixes #1287)
* fully write new config file before moving to target location (fixes #1287)
* do not fail if there is no previous config; print temporary config path on failure
2017-06-01 08:57:10 +01:00
Stefan Breunig
decd960867 make moveto/copyto no-ops when source and destination are the same (fixes #1261) 2017-05-30 23:01:19 +01:00
Nick Craig-Wood
71028e0f06 dropbox/dbhash: fix errcheck warning 2017-05-30 22:08:49 +01:00
Nick Craig-Wood
52e96bc0e2 dropbox: add missing dbhashsum command
This was missed from 6381959850
2017-05-30 19:26:06 +01:00
Nick Craig-Wood
178ff62d6a vendor: add github.com/ncw/dropbox-sdk-go-unofficial and remove github.com/stacktic/dropbox
In due course this will become github.com/dropbox/dropbox-sdk-go-unofficial
when the fate of https://github.com/dropbox/dropbox-sdk-go-unofficial/pull/14
has been decided.
2017-05-30 15:49:29 +01:00
Nick Craig-Wood
9d335eb5cb dropbox: add low level retries 2017-05-30 14:49:09 +01:00
Nick Craig-Wood
20da3e6352 Add options to Put, PutUnchecked and Update, add HashOption and speed up local
* Add options to Put, PutUnchecked and Update for all Fses
  * Use these to create HashOption
  * Implement this in local
  * Pass the option in fs.Copy

This has the effect that we only calculate hashes we need to in the
local Fs which speeds up transfers significantly.
2017-05-29 12:04:52 +01:00
Nick Craig-Wood
6381959850 dropbox: support Dropbox content hashing scheme - fixes #1302
* add support to hashing module
  * add dbhashsum to list the hashes
  * add support to dropbox module

This means objects up and downloaded to/from Dropbox will have their
hashes checked.

Note after this change local objects are calculating MD5, SHA1 and
DBHASH which is excessive and needs to be fixed.
2017-05-29 12:04:44 +01:00
Nick Craig-Wood
8916455e4f dropbox: implement dropbox hasher #1302 2017-05-29 12:04:34 +01:00
Nick Craig-Wood
8e214e838e dropbox: Update dropbox to use the v2 API #349
This is feature complete with the old version but now supports modification times.
2017-05-29 12:04:33 +01:00
Nick Craig-Wood
23acd3ce01 oauthutil: Don't expect tokens to have refresh URL 2017-05-29 12:04:33 +01:00
Stefan Breunig
a2e3af0523 poll for Google Drive changes when mounted 2017-05-28 17:54:52 +01:00
Nick Craig-Wood
5455d34f8c Fix ssh agent on Windows - fixes #1279 2017-05-26 10:21:07 +01:00
Nick Craig-Wood
84512ac77d vendor: add github.com/xanzy/ssh-agent for #1279 2017-05-26 10:21:06 +01:00
Nick Craig-Wood
1ec0327ed7 vendor: update cgofuse (because dep wanted to!) 2017-05-26 10:15:14 +01:00
Nick Craig-Wood
0f07b63fd1 ftp: convert the old config style to the new config style 2017-05-25 10:16:51 +01:00
Nick Craig-Wood
88ef475629 config: allow keys to be deleted from the config file 2017-05-25 10:15:22 +01:00
Sjur Fredriksen
ade61fa756 Updated FTP to follow SFTP standards, updated documentation 2017-05-25 09:30:15 +01:00
Nick Craig-Wood
cfc5f7bb2d Document another file to edit when making a remote 2017-05-25 09:28:18 +01:00
Nick Craig-Wood
ae9f8304fa Attempt to make async buffer test more reliable 2017-05-24 16:24:06 +01:00
Nick Craig-Wood
55755a8e5b Add Sjur Fredriksen to contributors 2017-05-24 15:59:49 +01:00
Sjur Fredriksen
080050fac2 Update ftp.md
Added information regarding non-standard FTP ports.
2017-05-24 15:59:18 +01:00
Nick Craig-Wood
a243ea6353 sftp: fix under Windows #1432
This was caused by erroneous use of filepath to parse unix standard paths
2017-05-24 15:39:17 +01:00
Nick Craig-Wood
51d2174c0b ftp: check connection before returning it to the pool #1435
If the last FTP command caused an error, and if the error wasn't a
regular FTP error code, then we check the connection is working using
a NOOP call before returning it to the connection pool.
2017-05-24 14:47:13 +01:00
Nick Craig-Wood
e75db0b14d Add Steven Lu to contributors 2017-05-24 08:44:42 +01:00
Steven Lu
c59a292719 Obtain a refresh token for GCD 2017-05-24 08:44:00 +01:00
Nick Craig-Wood
be5b8b8dff Add Bob Potter to contributors 2017-05-24 07:36:38 +01:00
Bob Potter
525220b14e Add --local-no-unicode-normalization flag
Fixes #1411
2017-05-24 07:36:06 +01:00
Nick Craig-Wood
a9d29c2264 ftp: don't pool the connection if file download failed 2017-05-19 17:45:22 +01:00
Nick Craig-Wood
8f54dc06a2 Use build tags to control when and where cmount is built 2017-05-19 17:08:04 +01:00
Nick Craig-Wood
7daf97f90a Add CircleCI badge to README 2017-05-19 16:06:43 +01:00
Nick Craig-Wood
2cae017738 mountlib: fix race condition in cache clear 2017-05-19 15:47:52 +01:00
Nick Craig-Wood
e172f00e0e ftp: fix errors from Close of a stream which hasn't been fully read 2017-05-19 12:28:47 +01:00
Nick Craig-Wood
412dacf8be Add a test for partial reads to all remotes 2017-05-19 12:28:47 +01:00
Nick Craig-Wood
cdacf026e4 ftp: implement server side move and directory move 2017-05-18 20:49:36 +01:00
Nick Craig-Wood
0ca6408580 ftp: rework mkdir to be more efficient 2017-05-18 20:49:36 +01:00
Nick Craig-Wood
9627a6142d ftp: support --contimeout 2017-05-18 20:49:36 +01:00
Nick Craig-Wood
6cc783f20b ftp: stop rmdir being recursive 2017-05-18 20:49:36 +01:00
Nick Craig-Wood
3136a75f4d ftp: add connection pool and remove excess locking 2017-05-18 20:49:36 +01:00
Nick Craig-Wood
a9101f8608 ftp: Fix for go1.6 and go1.7 2017-05-18 20:49:36 +01:00
Nick Craig-Wood
af043eda15 Vendor github.com/jlaffaye/ftp for ftp backend 2017-05-18 20:49:36 +01:00
Nick Craig-Wood
35c210d36f ftp: fix remaining issues to make tests work
* fix root
  * factor ftpConnection
  * fix path munging
  * fix recursive dir loops after update
  * use fs.Trace and comment out debugs
  * re-arrange and supplement docs
2017-05-18 20:49:36 +01:00
Nick Craig-Wood
3ed0440bd2 ftp: use path instead of filepath 2017-05-18 20:49:36 +01:00
Nick Craig-Wood
c13cff37ef ftp: replace URL parser with url.URL 2017-05-18 20:49:36 +01:00
Nick Craig-Wood
fce734662f ftp: fix golint/go vet/errchk errors and move methods into standard order 2017-05-18 20:49:36 +01:00
Nick Craig-Wood
e0ba1a2cd2 ftp: fix bitrot 2017-05-18 20:49:36 +01:00
Antonio Messina
c72fca2711 Add ftp backend - fixes #540 2017-05-18 20:49:36 +01:00
Nick Craig-Wood
ae17d88518 Add Bill Zissimopoulos to contributors 2017-05-18 20:48:47 +01:00
Bill Zissimopoulos
e19fc49a5f add circleci configuration 2017-05-18 20:45:08 +01:00
Bill Zissimopoulos
95c0378e3c update cgofuse dependency to v1.0.1 2017-05-18 20:45:08 +01:00
Nick Craig-Wood
7ee3cfd7c9 Add Igor Kharin to contributors 2017-05-15 21:03:16 +01:00
Igor Kharin
bd2cdeeeab sftp: specify HostKeyCallback in ClientConfig 2017-05-15 21:02:05 +01:00
Nick Craig-Wood
77cd93ef89 Fix tag to 8 digits of commit to make Appveyor and Travis consistent 2017-05-15 20:58:48 +01:00
Nick Craig-Wood
5b063679b5 travis: install libfuse for cmount build and disable on OS X 2017-05-15 17:41:16 +01:00
Nick Craig-Wood
09093a9954 Use appveyor to build the Windows beta releases 2017-05-15 17:41:16 +01:00
Nick Craig-Wood
df0cfa9735 Add -no-clean flag to cross-compile.go 2017-05-15 17:41:16 +01:00
Nick Craig-Wood
64d7489fd2 Add -include, -exclude -cgo to cross-compile.go 2017-05-15 17:41:16 +01:00
Nick Craig-Wood
ecedcd0e7f cmount: stop failing tests on Windows 2017-05-15 17:40:44 +01:00
Nick Craig-Wood
3dff91d691 mount: add missing build constraint to fix Windows build 2017-05-15 17:40:15 +01:00
Nick Craig-Wood
e131ef3714 Fix appveyor tests after vendor update 2017-05-15 16:56:47 +01:00
Nick Craig-Wood
ea0bc278ba cmount: Vendor github.com/billziss-gh/cgofuse 2017-05-15 16:56:47 +01:00
Nick Craig-Wood
b553c23d5b Automate production of zip files for Windows 2017-05-15 16:56:47 +01:00
Nick Craig-Wood
4f954896a8 appveyor: make build include WinFsp and test cmount 2017-05-15 16:56:47 +01:00
Nick Craig-Wood
b259f8b752 cmount, mount, mountlib: make --read-only reject modify operations
Normally mount/cmount use `-o ro` to get the kernel to mark the fs as
read only.  However this is ignored by WinFsp, so in addition if
`--read-only` is in effect then return EROFS ("Read only File System")
from all methods which attempt to modify something.
2017-05-15 16:56:47 +01:00
Nick Craig-Wood
8be8a8e41b mountlib: on read only open of file, make open pending until first read
This fixes a problem with Windows which seems fond of opening files
just to read their attributes and closing them again.
2017-05-15 16:56:47 +01:00
Nick Craig-Wood
79aa060e21 win-build.bat example bat file for building with WinFsp under Windows 2017-05-15 16:56:46 +01:00
Nick Craig-Wood
f9500729b7 mountlib: fix cross platform tests 2017-05-15 16:56:46 +01:00
Nick Craig-Wood
204a19e67f cmount: Wait for mountpoint to appear on Windows before declaring mounted 2017-05-15 16:56:46 +01:00
Nick Craig-Wood
e6ffe3464c cmount: check for filesystem blowing up before Init is called 2017-05-15 16:56:46 +01:00
Nick Craig-Wood
0384364c3e cmount: pass --FileSystemName under windows 2017-05-15 16:56:46 +01:00
Nick Craig-Wood
763facfd78 cmount: implement --fuse-flag to pass commands to fuse library directly
Useful for `--fuse-flag -h` to see exactly which options the library supports.
2017-05-15 16:56:46 +01:00
Nick Craig-Wood
bc88f1dafa cmount: fix openFile leak 2017-05-15 16:56:46 +01:00
Nick Craig-Wood
0c055a1215 cmount: Statfs: reduce max size of volume for Windows 2017-05-15 16:56:46 +01:00
Nick Craig-Wood
938d7951ab cmount: allow extra options to pass to fuse with -o 2017-05-15 16:56:45 +01:00
Nick Craig-Wood
b4466bd9b1 Add -o uid=-1 -o gid=-1 for Windows/WinFsp 2017-05-15 16:56:45 +01:00
Nick Craig-Wood
31f76aa464 cmount: implement no-ops for Fsync, Chmod, Chown, Access, Fsyncdir and stop using fuse.FileSystemBase 2017-05-15 16:56:45 +01:00
Nick Craig-Wood
c887c164dc cmount: add function tracing 2017-05-15 16:56:45 +01:00
Nick Craig-Wood
115ac00222 mount, mountlib: move function tracing into mount 2017-05-15 16:56:45 +01:00
Nick Craig-Wood
50e79bc087 fs: Implement fs.Trace for tracing entry and exit of functions 2017-05-15 16:56:45 +01:00
Nick Craig-Wood
abda616f84 mountlib: make Nodes also be fmt.Stringer so they debug nicely 2017-05-15 16:56:45 +01:00
Nick Craig-Wood
9c3048580a cmount: fix code quality warnings 2017-05-15 16:56:45 +01:00
Nick Craig-Wood
c1d5faa32a mountlib: fix code quality warnings 2017-05-15 16:56:45 +01:00
Nick Craig-Wood
d127d8686a mountlib: pass options in fsys not as args 2017-05-15 16:56:44 +01:00
Nick Craig-Wood
bc9856b570 Forward port 930ff266f2 to cmount branch
compare checksums on upload/download via FUSE
2017-05-15 16:56:44 +01:00
Nick Craig-Wood
855071cc19 cmount: name the command mount under windows and cmount under linux 2017-05-15 16:56:44 +01:00
Nick Craig-Wood
b179540e80 cmount: fix Getattr to work on directories 2017-05-15 16:56:44 +01:00
Nick Craig-Wood
6a8e4690d3 mountlib: windows fixes for drive letter and timing 2017-05-15 16:56:44 +01:00
Nick Craig-Wood
917ea6ac57 mountlib: make tests work under all platforms 2017-05-15 16:56:44 +01:00
Nick Craig-Wood
7b47a1e842 cmount: set the correct values for uid, gid under Windows 2017-05-15 16:56:44 +01:00
Nick Craig-Wood
bcd87009e2 Fix docs typo 2017-05-15 16:56:44 +01:00
Nick Craig-Wood
caf85737c3 cmount: fix Windows compile (thanks Bill Zissimopoulos) 2017-05-15 16:56:44 +01:00
Nick Craig-Wood
e1516e0159 Forward port 58a82cd578 into cmount branch
allow the fuse directory cached to be cleaned manually
2017-05-15 16:56:43 +01:00
Nick Craig-Wood
ee1111e4c9 cmount: a new mount option based on cgofuse.
This with the aid of WinFSP should work on Windows.

Unfinished bits
  * 1 test doesn't pass
  * docs
  * build
2017-05-15 16:56:43 +01:00
Nick Craig-Wood
268fe0004c mount: factor filesystem code into mountlib and mounttest 2017-05-12 21:24:24 +01:00
Nick Craig-Wood
0c92a64bb3 vendor: update spf13/cobra to fix arg parsing 2017-05-12 19:49:32 +01:00
Nick Craig-Wood
8b61692754 vendor: update github.com/aws/aws-sdk-go to get plan9 build fix 2017-05-12 14:24:51 +01:00
Nick Craig-Wood
663e6f3ec0 vendor: patch github.com/aws/aws-sdk-go to fix the build
Temporary until https://github.com/aws/aws-sdk-go/pull/1262 is merged.
2017-05-11 17:11:35 +01:00
Nick Craig-Wood
17633f5460 Require go1.6 for building rclone
This is required because google.golang.org/grpc needs it.
2017-05-11 17:07:49 +01:00
Nick Craig-Wood
98c2d2c41b Switch to using the dep tool and update all the dependencies 2017-05-11 15:39:54 +01:00
Nick Craig-Wood
5135ff73cb Compile 386 builds with "GO386=387" for maximum compatibility #437 2017-05-09 11:58:29 +01:00
Stefan Breunig
58a82cd578 allow the fuse directory cached to be cleaned manually (fixes #803) 2017-05-07 12:08:59 +01:00
Nick Craig-Wood
d86ea8623b Add Yoni Jah second email to contributors 2017-05-02 22:54:11 +01:00
Yoni Jah
cdeeff988e Added RepetableReader to fs. used in OneDrive with io.LimitedReader to display accurate speed 2017-05-02 22:31:05 +01:00
Stefan Breunig
930ff266f2 compare checksums on upload/download via FUSE 2017-05-02 22:27:38 +01:00
Nick Craig-Wood
d5c0fe632f Add Zahiar Ahmed to contributors 2017-05-02 22:16:16 +01:00
Zahiar Ahmed
3c5c5eeec2 Add us-east-2 (Ohio) and eu-west-2 (London) S3 regions 2017-05-02 22:07:50 +01:00
Martin Kristensen
56f017c60c drive: use explicit fields for all endpoints
Reuses the same fields for all endpoints for simplicitys sake.
Should solve remaining part of #1346
2017-05-02 21:30:45 +01:00
Nick Craig-Wood
b6517840ca Update build to go 1.8.1 2017-04-25 08:10:36 +01:00
Nick Craig-Wood
1ccfea5aa9 Add Anisse Astier to contributors 2017-04-25 08:08:33 +01:00
Anisse Astier
7e858f4b8d dropbox: typo
dropbix -> dropbox.
2017-04-25 08:07:37 +01:00
Martin Kristensen
7b4f368307 acd: fix typo in log message for temp link download 2017-04-25 08:07:00 +01:00
Nick Craig-Wood
06a3502ed8 Script to update authors.md automatically from the git changelog 2017-04-24 20:36:06 +01:00
Nick Craig-Wood
a9a43144ca Add Too Much IO to contributors 2017-04-24 20:33:51 +01:00
Martin Kristensen
dd968a8ccf drive: nextPageToken field was missing
Fixes the bug found by users in #1346
2017-04-24 19:50:51 +01:00
Martin Kristensen
0d6e1afe54 drive: only request owner field when using --drive-auth-owner-only
This fixes the note @ncw made in #1359
2017-04-24 10:35:42 +01:00
Nick Craig-Wood
7d9faffd4b Add Martin Kristensen to contributors 2017-04-23 17:03:20 +01:00
Martin Kristensen
d7df065320 drive: reduce bandwidth by adding fields for partial responses
Fixes #1346
2017-04-23 17:01:15 +01:00
Michael Ledin
84d4d7f9d9 oauthutil: Print redirection URI if using own credentials. 2017-04-22 10:37:46 +01:00
Nick Craig-Wood
733d6fe56c Add Michael Ledin to contributors 2017-04-22 10:24:33 +01:00
Michael Ledin
8350544092 onedrive: swap to using http://localhost:53682/ as redirect URL.
The previous redirect URL http://localhost.rclone.org:53682/ can't be
used any more in new OneDrive authentication which is a problem for
users trying to make their own credentials.
2017-04-22 10:08:18 +01:00
Nick Craig-Wood
6a63bc2788 Add Hraban Luyat to contributors 2017-04-22 09:39:46 +01:00
Hraban Luyat
66e8c1600e Print password prompts to stderr
This makes rclone with encrypted config better suited for use in
pipelines. E.g.:

$ rclone lsl mydrive:Some/Dir | sort -k 4

If the password prompt ("Enter configuration password") is printed to
stdout, it will be swallowed by sort. By printing it to stderr, you
still see the prompt, without sacrificing compatibility with the unix
pipeline.
2017-04-22 09:38:39 +01:00
Stephen Harris
82b8d68ffb crypt: report the name:root as specified by the user
Rather then the underlying Fs root (which may be encrypted when
filename_encryption is set).

Fixes #1305
2017-04-22 09:28:05 +01:00
Nick Craig-Wood
b86bbcd67e Add Jon Craton to contributors 2017-04-22 09:22:51 +01:00
Jon Craton
38b6d607aa fixed typo 2017-04-22 09:21:44 +01:00
Stephen Harris
e1647a5a08 crypt: Fix obfuscate filename encryption method
Fix issue #1315 where filenames calculated with a base distance of zero
(ie the characters add up to 0(mod 256) aren't de-obfuscated on reading.
This was due to overloading of "0" to also mean "invalid UTF8; no rotation",
so we remove that double meaning
2017-04-22 09:16:00 +01:00
Nick Craig-Wood
bc25190fc7 Fix misleading log message with --dry-run - fixes #1309 2017-04-10 16:07:22 +01:00
Yoni Jah
e3a41321cc onedrive: changed QueryEscape to PathEscape - fixes #1296 2017-04-10 15:46:15 +01:00
Stefan Breunig
2fd86c93fc allow modTime to be changed even before all writers are closed (fixes #1197 -- again) 2017-03-31 01:28:08 +02:00
Nick Craig-Wood
2b8c461e04 Add Ihor Dvoretskyi to contributors 2017-03-29 18:12:13 +01:00
Ihor Dvoretskyi
a54692d165 OneDrive vs One Drive
It's better to call this service as it's officially named.
2017-03-29 18:11:33 +01:00
Nick Craig-Wood
4b4c59a4bb crypt: add integration tests for obfuscate name encryption 2017-03-29 17:57:10 +01:00
Nick Craig-Wood
81d688107e Add Stephen Harris to contributors 2017-03-29 17:57:03 +01:00
Stephen Harris
6e003934fc crypt: add an "obfuscate" option for filename encryption.
This is a simple "rotate" of the filename, with each file having a rot
distance based on the filename.  We store the distance at the beginning
of the filename.  So a file called "go" would become "37.KS".

This is not a strong encryption of filenames, but it should stop automated
scanning tools from picking up on filename patterns.  As such it's an
intermediate between "off" and "standard".  The advantage is that it
allows for longer path segment names.

We use the nameKey as an additional input to calculate the obfuscation
distance.  This should mean that two different passwords will result
in two different keys

The obfuscation rotation works by splitting the ranges up and handle cases
  0-9
  A-Za-z
  0xA0-0xFF
  and anything greater in blocks of 256
2017-03-29 17:56:55 +01:00
Dedsec1
37e1b20ec1 Updated .pkgr.yml file to use rclone as its own cli. 2017-03-29 17:48:53 +01:00
Nick Craig-Wood
d1787b50fd Yoni Jah to contributors 2017-03-29 17:38:14 +01:00
Yoni Jah
9dfc346998 onedrive: Retry on token expired error, reset upload body on retry 2017-03-29 17:38:07 +01:00
Nick Craig-Wood
9ab4c19945 Add Danny Tsai to contributors 2017-03-29 17:26:03 +01:00
Danny Tsai
3bab119fa5 drive: implement --drive-shared-with-me flag to view shared with me files 2017-03-29 17:23:30 +01:00
Nick Craig-Wood
1fdf3e2aae Add Marvin Watson to contributors 2017-03-29 17:12:17 +01:00
marvwatson
4810aa65a4 Update references from HTTP to HTTPS where possible 2017-03-29 05:38:34 -07:00
Nick Craig-Wood
f798552cf1 Update urls to https after site move 2017-03-29 10:06:22 +01:00
Stefan Breunig
4dc030d081 implement ModTime via FUSE for remotes that support it (fixes #1197) 2017-03-24 09:23:04 +01:00
Nick Craig-Wood
216499d78b Add Mike Tesch to authors 2017-03-19 08:26:41 +00:00
Mike Tesch
60f636ee15 Fix spelling of Unfortunately 2017-03-18 20:22:19 -04:00
Nick Craig-Wood
f0bf117a04 Add Jérôme Vizcaino to authors 2017-03-18 21:24:05 +00:00
Jérôme Vizcaino
788b6ce821 mount: umount dir when program ends with SIGINT (Ctrl+C) or SIGTERM 2017-03-18 21:24:05 +00:00
Nick Craig-Wood
503cd84919 Start v1.36-DEV development 2017-03-18 11:30:59 +00:00
12414 changed files with 8531246 additions and 114907 deletions

46
.appveyor.yml Normal file
View File

@@ -0,0 +1,46 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone
environment:
GOPATH: C:\gopath
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
ORIGPATH: '%PATH%'
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
PATH: '%PATHCC64%'
RCLONE_CONFIG_PASS:
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
install:
- choco install winfsp -y
- choco install zip -y
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
build_script:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go install
- go build
- make log_since_last_release > %TEMP%\git-log.txt
- make version > %TEMP%\version
- set /p RCLONE_VERSION=<%TEMP%\version
- set PATH=%PATHCC32%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
- set PATH=%PATHCC64%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
test_script:
- make GOTAGS=cmount quicktest
artifacts:
- path: rclone.exe
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

34
.circleci/config.yml Normal file
View File

@@ -0,0 +1,34 @@
version: 2
jobs:
build:
machine: true
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
steps:
- checkout
- run:
name: Cross-compile rclone
command: |
docker pull billziss/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
--image=billziss/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
.
xgo \
--targets=android/*,ios/* \
.
- run:
name: Prepare artifacts
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
- store_artifacts:
path: /tmp/rclone.dist

2
.gitignore vendored
View File

@@ -3,3 +3,5 @@ _junk/
rclone
build
docs/public
rclone.iml
.idea

14
.gometalinter.json Normal file
View File

@@ -0,0 +1,14 @@
{
"Enable": [
"deadcode",
"errcheck",
"goimports",
"golint",
"ineffassign",
"structcheck",
"varcheck",
"vet"
],
"EnableGC": true,
"Vendor": true
}

View File

@@ -1,2 +1,2 @@
default_dependencies: false
cli: false
cli: rclone

View File

@@ -1,14 +1,17 @@
language: go
sudo: false
osx_image: xcode7.3
sudo: required
dist: trusty
os:
- linux
go:
- 1.5.4
- 1.6.4
- 1.7.4
- 1.8
- 1.7.6
- 1.8.7
- 1.9.3
- "1.10.1"
- tip
before_install:
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
install:
- git fetch --unshallow --tags
- make vars
@@ -16,19 +19,32 @@ install:
script:
- make check
- make quicktest
- make compile_all
env:
matrix:
secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
global:
- GOTAGS=cmount
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
addons:
apt:
packages:
- fuse
- libfuse-dev
- rpm
- pkg-config
matrix:
allow_failures:
- go: tip
include:
- os: osx
go: 1.8
go: "1.10.1"
env: GOTAGS=""
deploy:
provider: script
script: make travis_beta
skip_cleanup: true
on:
branch: master
go: 1.8
condition: "`uname` == 'Linux'"
all_branches: true
go: "1.10.1"
condition: $TRAVIS_PULL_REQUEST == false

View File

@@ -10,7 +10,7 @@ of filing an issue.
When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](http://beta.rclone.org/):
with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (eg output from `rclone -V`)
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
@@ -100,21 +100,74 @@ need to make a remote called `TestDrive`.
You can then run the unit tests in the drive directory. These tests
are skipped if `TestDrive:` isn't defined.
cd drive
cd backend/drive
go test -v
You can then run the integration tests which tests all of rclone's
operations. Normally these get run against the local filing system,
but they can be run against any of the remotes.
cd ../fs
cd fs/sync
go test -v -remote TestDrive:
go test -v -remote TestDrive: -subdir
If you want to run all the integration tests against all the remotes,
then run in that directory
cd fs/operations
go test -v -remote TestDrive:
go run test_all.go
If you want to run all the integration tests against all the remotes,
then change into the project root and run
make test
This command is run daily on the the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ##
Rclone code is organised into a small number of top level directories
with modules beneath.
* backend - the rclone backends for interfacing to cloud providers -
* all - import this to load all the cloud providers
* ...providers
* bin - scripts for use while building or maintaining rclone
* cmd - the rclone commands
* all - import this to load all the commands
* ...commands
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead
* config - manage the config file and flags
* driveletter - detect if a name is a drive letter
* filter - implements include/exclude filtering
* fserrors - rclone specific error handling
* fshttp - http handling for rclone
* fspath - path handling for rclone
* hash - defines rclones hash types and functions
* list - list a remote
* log - logging facilities
* march - iterates directories in lock step
* object - in memory Fs objects
* operations - primitives for sync, eg Copy, Move
* sync - sync directories
* walk - walk a directory
* fstest - provides integration test framework
* fstests - integration tests for the backends
* mockdir - mocks an fs.Directory
* mockobject - mocks an fs.Object
* test_all - Runs integration tests for everything
* graphics - the images used in the website etc
* lib - libraries used by the backend
* atexit - register functions to run when rclone exits
* dircache - directory ID to name caching
* oauthutil - helpers for using oauth
* pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST
* vendor - 3rd party code managed by the dep tool
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ##
@@ -139,15 +192,93 @@ Documentation for rclone sub commands is with their code, eg
There are separate instructions for making a release in the RELEASE.md
file.
## Updating the vendor dirctory ##
## Commit messages ##
Do these commands to update the entire build directory to the latest
version of all the dependencies. This should be done early in the
release cycle. Individual dependencies can be added with `godep get`.
Please make the first line of your commit message a summary of the
change, and prefix it with the directory of the change followed by a
colon. The changelog gets made by looking at just these first lines
so make it good!
If you have more to say about the commit, then enter a blank line and
carry on the description. Remember to say why the change was needed -
the commit itself shows what was changed.
If the change fixes an issue then write `Fixes #1234` in the commit
message. This can be on the subject line if it will fit. If you
don't want to close the associated issue just put `#1234` and the
change will get linked into the issue.
Here is an example of a short commit message:
```
drive: add team drive support - fixes #885
```
And here is an example of a longer one:
```
mount: fix hang on errored upload
In certain circumstances if an upload failed then the mount could hang
indefinitely. This was fixed by closing the read pipe after the Put
completed. This will cause the write side to return a pipe closed
error fixing the hang.
Fixes #1498
```
## Adding a dependency ##
rclone uses the [dep](https://github.com/golang/dep) tool to manage
its dependencies. All code that rclone needs for building is stored
in the `vendor` directory for perfectly reproducable builds.
The `vendor` directory is entirely managed by the `dep` tool.
To add a new dependency, run `dep ensure` and `dep` will pull in the
new dependency to the `vendor` directory and update the `Gopkg.lock`
file.
You can add constraints on that package in the `Gopkg.toml` file (see
the `dep` documentation), but don't unless you really need to.
Please check in the changes generated by `dep` including the `vendor`
directory and `Godep.toml` and `Godep.lock` in a single commit
separate from any other code changes. Watch out for new files in
`vendor`.
## Updating a dependency ##
If you need to update a dependency then run
dep ensure -update github.com/pkg/errors
Check in in a single commit as above.
## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This
just runs `dep ensure -update`. Check in the changes in a single
commit as above.
This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing.
## Updating a backend ##
If you update a backend then please run the unit tests and the
integration tests for that backend.
Assuming the backend is called `remote`, make create a config entry
called `TestRemote` for the tests to use.
Now `cd remote` and run `go test -v` to run the unit tests.
Then `cd fs` and run `go test -v -remote TestRemote:` to run the
integration tests.
The next section goes into more detail about the tests.
* make build_dep
* make update
## Writing a new backend ##
Choose a name. The docs here will use `remote` as an example.
@@ -162,25 +293,35 @@ Research
Getting going
* Create `remote/remote.go` (copy this from a similar fs)
* Add your fs to the imports in `fs/all/all.go`
* Create `backend/remote/remote.go` (copy this from a similar remote)
* box is a good one to start from if you have a directory based remote
* b2 is a good one to start from if you have a bucket based remote
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.
Unit tests
* Create a config entry called `TestRemote` for the unit tests to use
* Add your fs to the end of `fstest/fstests/gen_tests.go`
* generate `remote/remote_test.go` unit tests `cd fstest/fstests; go generate`
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
* Make sure all tests pass with `go test -v`
Integration tests
* Add your fs to `fs/test_all.go`
* Add your fs to `fstest/test_all/test_all.go`
* Make sure integration tests pass with
* `cd fs`
* `go test -v -remote TestRemote:` and
* `cd fs/operations`
* `go test -v -remote TestRemote:`
* `cd fs/sync`
* `go test -v -remote TestRemote:`
* If you are making a bucket based remote, then check with this also
* `go test -v -remote TestRemote: -subdir`
* And if your remote defines `ListR` this also
* `go test -v -remote TestRemote: -fast-list`
Add your fs to the docs
See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
* `README.md` - main Github page
* `docs/content/remote.md` - main docs page
@@ -189,3 +330,4 @@ Add your fs to the docs
* `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
* `cmd/cmd.go` - the main help for rclone

513
Godeps/Godeps.json generated
View File

@@ -1,513 +0,0 @@
{
"ImportPath": "github.com/ncw/rclone",
"GoVersion": "go1.7",
"GodepVersion": "v75",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "bazil.org/fuse",
"Rev": "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
},
{
"ImportPath": "bazil.org/fuse/fs",
"Rev": "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
},
{
"ImportPath": "bazil.org/fuse/fuseutil",
"Rev": "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
},
{
"ImportPath": "cloud.google.com/go/compute/metadata",
"Comment": "v0.6.0-68-g0b87d14",
"Rev": "0b87d14d90086b53a97dfbd66f3000f7f112b494"
},
{
"ImportPath": "cloud.google.com/go/internal",
"Comment": "v0.6.0-68-g0b87d14",
"Rev": "0b87d14d90086b53a97dfbd66f3000f7f112b494"
},
{
"ImportPath": "github.com/Unknwon/goconfig",
"Rev": "87a46d97951ee1ea20ed3b24c25646a79e87ba5d"
},
{
"ImportPath": "github.com/VividCortex/ewma",
"Comment": "v1.0-20-gc595cd8",
"Rev": "c595cd886c223c6c28fc9ae2727a61b5e4693d85"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/endpoints",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3iface",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3manager",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sts",
"Comment": "v1.6.24-1-g2d3b3bc",
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
},
{
"ImportPath": "github.com/cpuguy83/go-md2man/md2man",
"Comment": "v1.0.6",
"Rev": "a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa"
},
{
"ImportPath": "github.com/davecgh/go-spew/spew",
"Comment": "v1.1.0",
"Rev": "346938d642f2ec3594ed81d874461961cd0faa76"
},
{
"ImportPath": "github.com/go-ini/ini",
"Comment": "v1.24.0-2-gee900ca",
"Rev": "ee900ca565931451fe4e4409bcbd4316331cec1c"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "8ee79997227bf9b34611aee7946ae64735e6fd93"
},
{
"ImportPath": "github.com/google/go-querystring/query",
"Rev": "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
},
{
"ImportPath": "github.com/googleapis/gax-go",
"Rev": "da06d194a00e19ce00d9011a13931c3f6f6887c7"
},
{
"ImportPath": "github.com/inconshreveable/mousetrap",
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
},
{
"ImportPath": "github.com/jmespath/go-jmespath",
"Comment": "0.2.2-14-gbd40a43",
"Rev": "bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d"
},
{
"ImportPath": "github.com/kr/fs",
"Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
},
{
"ImportPath": "github.com/ncw/go-acd",
"Rev": "7954f1fad2bda6a7836999003e4481d6e32edc1e"
},
{
"ImportPath": "github.com/ncw/swift",
"Rev": "6c1b1510538e1f00d49a558b7b9b87d71bc454d6"
},
{
"ImportPath": "github.com/pkg/errors",
"Comment": "v0.8.0-2-g248dadf",
"Rev": "248dadf4e9068a0b3e79f02ed0a610d935de5302"
},
{
"ImportPath": "github.com/pkg/sftp",
"Rev": "ff7e52ffd762466ebd2c4e710d5436dccc539f54"
},
{
"ImportPath": "github.com/pmezard/go-difflib/difflib",
"Comment": "v1.0.0",
"Rev": "792786c7400a136282c1664665ae0a8db921c6c2"
},
{
"ImportPath": "github.com/rfjakob/eme",
"Comment": "v1.0-15-gfd00240",
"Rev": "fd00240838d2e0fe6b2c58bf5b27db843d828ad5"
},
{
"ImportPath": "github.com/russross/blackfriday",
"Comment": "v1.4-40-g5f33e7b",
"Rev": "5f33e7b7878355cd2b7e6b8eefc48a5472c69f70"
},
{
"ImportPath": "github.com/shurcooL/sanitized_anchor_name",
"Rev": "1dba4b3954bc059efc3991ec364f9f9a35f597d2"
},
{
"ImportPath": "github.com/skratchdot/open-golang/open",
"Rev": "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
},
{
"ImportPath": "github.com/spf13/cobra",
"Rev": "b5d8e8f46a2f829f755b6e33b454e25c61c935e1"
},
{
"ImportPath": "github.com/spf13/cobra/doc",
"Rev": "b5d8e8f46a2f829f755b6e33b454e25c61c935e1"
},
{
"ImportPath": "github.com/spf13/pflag",
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
},
{
"ImportPath": "github.com/stacktic/dropbox",
"Rev": "58f839b21094d5e0af7caf613599830589233d20"
},
{
"ImportPath": "github.com/stretchr/testify/assert",
"Comment": "v1.1.4-27-g4d4bfba",
"Rev": "4d4bfba8f1d1027c4fdbe371823030df51419987"
},
{
"ImportPath": "github.com/stretchr/testify/require",
"Comment": "v1.1.4-27-g4d4bfba",
"Rev": "4d4bfba8f1d1027c4fdbe371823030df51419987"
},
{
"ImportPath": "github.com/tsenart/tb",
"Rev": "19f4c3d79d2bd67d0911b2e310b999eeea4454c1"
},
{
"ImportPath": "golang.org/x/crypto/curve25519",
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
},
{
"ImportPath": "golang.org/x/crypto/ed25519",
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
},
{
"ImportPath": "golang.org/x/crypto/ed25519/internal/edwards25519",
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
},
{
"ImportPath": "golang.org/x/crypto/nacl/secretbox",
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
},
{
"ImportPath": "golang.org/x/crypto/pbkdf2",
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
},
{
"ImportPath": "golang.org/x/crypto/poly1305",
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
},
{
"ImportPath": "golang.org/x/crypto/salsa20/salsa",
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
},
{
"ImportPath": "golang.org/x/crypto/scrypt",
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
},
{
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
},
{
"ImportPath": "golang.org/x/crypto/ssh/agent",
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
},
{
"ImportPath": "golang.org/x/crypto/ssh/terminal",
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
},
{
"ImportPath": "golang.org/x/net/context/ctxhttp",
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
},
{
"ImportPath": "golang.org/x/net/http2",
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
},
{
"ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
},
{
"ImportPath": "golang.org/x/net/idna",
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
},
{
"ImportPath": "golang.org/x/net/internal/timeseries",
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
},
{
"ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
},
{
"ImportPath": "golang.org/x/net/trace",
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
},
{
"ImportPath": "golang.org/x/oauth2/google",
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
},
{
"ImportPath": "golang.org/x/oauth2/internal",
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
},
{
"ImportPath": "golang.org/x/oauth2/jws",
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
},
{
"ImportPath": "golang.org/x/oauth2/jwt",
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
},
{
"ImportPath": "golang.org/x/sys/unix",
"Rev": "075e574b89e4c2d22f2286a7e2b919519c6f3547"
},
{
"ImportPath": "golang.org/x/text/transform",
"Rev": "85c29909967d7f171f821e7a42e7b7af76fb9598"
},
{
"ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "85c29909967d7f171f821e7a42e7b7af76fb9598"
},
{
"ImportPath": "google.golang.org/api/drive/v2",
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
},
{
"ImportPath": "google.golang.org/api/gensupport",
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
},
{
"ImportPath": "google.golang.org/api/googleapi",
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
},
{
"ImportPath": "google.golang.org/api/googleapi/internal/uritemplates",
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
},
{
"ImportPath": "google.golang.org/api/storage/v1",
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
},
{
"ImportPath": "google.golang.org/appengine",
"Comment": "v1.0.0-28-g2e4a801",
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
},
{
"ImportPath": "google.golang.org/appengine/internal",
"Comment": "v1.0.0-28-g2e4a801",
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
},
{
"ImportPath": "google.golang.org/appengine/internal/app_identity",
"Comment": "v1.0.0-28-g2e4a801",
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
},
{
"ImportPath": "google.golang.org/appengine/internal/base",
"Comment": "v1.0.0-28-g2e4a801",
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
},
{
"ImportPath": "google.golang.org/appengine/internal/datastore",
"Comment": "v1.0.0-28-g2e4a801",
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
},
{
"ImportPath": "google.golang.org/appengine/internal/log",
"Comment": "v1.0.0-28-g2e4a801",
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
},
{
"ImportPath": "google.golang.org/appengine/internal/modules",
"Comment": "v1.0.0-28-g2e4a801",
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
},
{
"ImportPath": "google.golang.org/appengine/internal/remote_api",
"Comment": "v1.0.0-28-g2e4a801",
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
},
{
"ImportPath": "google.golang.org/grpc",
"Comment": "v1.0.5-52-gd0c32ee",
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
},
{
"ImportPath": "google.golang.org/grpc/codes",
"Comment": "v1.0.5-52-gd0c32ee",
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
},
{
"ImportPath": "google.golang.org/grpc/credentials",
"Comment": "v1.0.5-52-gd0c32ee",
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
},
{
"ImportPath": "google.golang.org/grpc/grpclog",
"Comment": "v1.0.5-52-gd0c32ee",
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
},
{
"ImportPath": "google.golang.org/grpc/internal",
"Comment": "v1.0.5-52-gd0c32ee",
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
},
{
"ImportPath": "google.golang.org/grpc/metadata",
"Comment": "v1.0.5-52-gd0c32ee",
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
},
{
"ImportPath": "google.golang.org/grpc/naming",
"Comment": "v1.0.5-52-gd0c32ee",
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
},
{
"ImportPath": "google.golang.org/grpc/peer",
"Comment": "v1.0.5-52-gd0c32ee",
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
},
{
"ImportPath": "google.golang.org/grpc/stats",
"Comment": "v1.0.5-52-gd0c32ee",
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
},
{
"ImportPath": "google.golang.org/grpc/tap",
"Comment": "v1.0.5-52-gd0c32ee",
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
},
{
"ImportPath": "google.golang.org/grpc/transport",
"Comment": "v1.0.5-52-gd0c32ee",
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "a3f3340b5840cee44f372bddb5880fcbc419b46a"
}
]
}

490
Gopkg.lock generated Normal file
View File

@@ -0,0 +1,490 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "bazil.org/fuse"
packages = [
".",
"fs",
"fuseutil"
]
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
version = "v0.23.0"
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
packages = [
"storage",
"version"
]
revision = "fbe7db0e3f9793ba3e5704efbab84f51436c136e"
version = "v18.0.0"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = [
"autorest",
"autorest/adal",
"autorest/azure",
"autorest/date"
]
revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318"
version = "v10.12.0"
[[projects]]
branch = "master"
name = "github.com/Unknwon/goconfig"
packages = ["."]
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
[[projects]]
name = "github.com/VividCortex/ewma"
packages = ["."]
revision = "b24eb346a94c3ba12c1da1e564dbac1b498a77ce"
version = "v1.1.1"
[[projects]]
branch = "master"
name = "github.com/a8m/tree"
packages = ["."]
revision = "3cf936ce15d6100c49d9c75f79c220ae7e579599"
[[projects]]
name = "github.com/abbot/go-http-auth"
packages = ["."]
revision = "0ddd408d5d60ea76e320503cc7dd091992dee608"
version = "v0.4.0"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/csm",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/sdkio",
"internal/sdkrand",
"internal/shareddefaults",
"private/protocol",
"private/protocol/eventstream",
"private/protocol/eventstream/eventstreamapi",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/restxml",
"private/protocol/xml/xmlutil",
"service/s3",
"service/s3/s3iface",
"service/s3/s3manager",
"service/sts"
]
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
version = "v1.14.8"
[[projects]]
name = "github.com/billziss-gh/cgofuse"
packages = ["fuse"]
revision = "ea66f9809c71af94522d494d3d617545662ea59d"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/coreos/bbolt"
packages = ["."]
revision = "af9db2027c98c61ecd8e17caa5bd265792b9b9a2"
[[projects]]
name = "github.com/cpuguy83/go-md2man"
packages = ["md2man"]
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
version = "v1.0.8"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
name = "github.com/djherbis/times"
packages = ["."]
revision = "95292e44976d1217cf3611dc7c8d9466877d3ed5"
version = "v1.0.1"
[[projects]]
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
packages = [
"dropbox",
"dropbox/async",
"dropbox/common",
"dropbox/file_properties",
"dropbox/files",
"dropbox/seen_state",
"dropbox/sharing",
"dropbox/team_common",
"dropbox/team_policies",
"dropbox/users",
"dropbox/users_common"
]
revision = "7afa861bfde5a348d765522b303b6fbd9d250155"
version = "v4.1.0"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
version = "v1.37.0"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/google/go-querystring"
packages = ["query"]
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
name = "github.com/jlaffaye/ftp"
packages = ["."]
revision = "2403248fa8cc9f7909862627aa7337f13f8e0bf1"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "0b12d6b5"
[[projects]]
branch = "master"
name = "github.com/kardianos/osext"
packages = ["."]
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
[[projects]]
name = "github.com/kr/fs"
packages = ["."]
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
version = "v0.1.0"
[[projects]]
name = "github.com/marstr/guid"
packages = ["."]
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
version = "v1.1.0"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
branch = "master"
name = "github.com/ncw/go-acd"
packages = ["."]
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
[[projects]]
name = "github.com/ncw/swift"
packages = ["."]
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
version = "v1.0.39"
[[projects]]
branch = "master"
name = "github.com/nsf/termbox-go"
packages = ["."]
revision = "5c94acc5e6eb520f1bcd183974e01171cc4c23b3"
[[projects]]
branch = "master"
name = "github.com/okzk/sdnotify"
packages = ["."]
revision = "ed8ca104421a21947710335006107540e3ecb335"
[[projects]]
name = "github.com/patrickmn/go-cache"
packages = ["."]
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
version = "v2.1.0"
[[projects]]
name = "github.com/pengsrc/go-shared"
packages = [
"buffer",
"check",
"convert",
"log",
"reopen"
]
revision = "807ee759d82c84982a89fb3dc875ef884942f1e5"
version = "v0.2.0"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
version = "1.8.0"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/rfjakob/eme"
packages = ["."]
revision = "01668ae55fe0b79a483095689043cce3e80260db"
version = "v1.1"
[[projects]]
name = "github.com/russross/blackfriday"
packages = ["."]
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
version = "v1.5.1"
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
packages = ["."]
revision = "f9261e73885de99b1647d68bedadf2b9a99ad11f"
[[projects]]
branch = "master"
name = "github.com/skratchdot/open-golang"
packages = ["open"]
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
[[projects]]
name = "github.com/spf13/cobra"
packages = [
".",
"doc"
]
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
version = "v0.0.3"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
name = "github.com/stretchr/testify"
packages = [
"assert",
"require"
]
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[[projects]]
branch = "master"
name = "github.com/t3rm1n4l/go-mega"
packages = ["."]
revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813"
[[projects]]
branch = "master"
name = "github.com/xanzy/ssh-agent"
packages = ["."]
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
[[projects]]
name = "github.com/yunify/qingstor-sdk-go"
packages = [
".",
"config",
"logger",
"request",
"request/builder",
"request/data",
"request/errors",
"request/signer",
"request/unpacker",
"service",
"utils"
]
revision = "4f9ac88c5fec7350e960aabd0de1f1ede0ad2895"
version = "v2.2.14"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"internal/chacha20",
"internal/subtle",
"nacl/secretbox",
"pbkdf2",
"poly1305",
"salsa20/salsa",
"scrypt",
"ssh",
"ssh/agent",
"ssh/terminal"
]
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"html",
"html/atom",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"publicsuffix",
"webdav",
"webdav/internal/xml",
"websocket"
]
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt"
]
revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"unix",
"windows"
]
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
[[projects]]
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]]
branch = "master"
name = "google.golang.org/api"
packages = [
"drive/v3",
"gensupport",
"googleapi",
"googleapi/internal/uritemplates",
"storage/v1"
]
revision = "2eea9ba0a3d94f6ab46508083e299a00bbbc65f6"
[[projects]]
name = "google.golang.org/appengine"
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"log",
"urlfetch"
]
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
version = "v1.1.0"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "c1378c5fc821e27711155958ff64b3c74b56818ba4733dbfe0c86d518c32880e"
solver-name = "gps-cdcl"
solver-version = 1

11
Gopkg.toml Normal file
View File

@@ -0,0 +1,11 @@
# pin this to master to pull in the macOS changes
# can likely remove for 1.43
[[override]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
# pin this to master to pull in the fix for linux/mips
# can likely remove for 1.43
[[override]]
branch = "master"
name = "github.com/coreos/bbolt"

View File

@@ -1,17 +1,43 @@
When filing an issue, please include the following information if possible as well as a description of the problem. Make sure you test with the latest beta of rclone.
<!--
http://beta.rclone.org/
http://rclone.org/downloads/
Hi!
If you've just got a question or aren't sure if you've found a bug then please use the [rclone forum](https://forum.rclone.org/) instead of filing an issue.
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
> What is your rclone version (eg output from `rclone -V`)
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
> Which OS you are using and how many bits (eg Windows 7, 64 bit)
https://forum.rclone.org/
> Which cloud storage system are you using? (eg Google Drive)
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
> The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
If you think you might have found a bug, please can you try to replicate it with the latest beta?
> A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
Thanks
The Rclone Developers
-->
#### What is the problem you are having with rclone?
#### What is your rclone version (eg output from `rclone -V`)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

87
MAINTAINERS.md Normal file
View File

@@ -0,0 +1,87 @@
# Maintainers guide for rclone #
Current active maintainers of rclone are
* Nick Craig-Wood @ncw
* Stefan Breunig @breunigs
* Ishuah Kariuki @ishuah
* Remus Bunduc @remusb - cache subsystem maintainer
* Fabian Möller @B4dM4n
**This is a work in progress Draft**
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
## Triaging Tickets ##
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
Rclone uses the labels like this:
* `bug` - a definite verified bug
* `can't reproduce` - a problem which we can't reproduce
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
* `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature
* `FUSE` - do do with `rclone mount` command
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation etc
* `Needs Go 1.XX` - waiting for that version of Go to be released
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
* `Remote: XXX` - which rclone backend this affects
* `thinking` - not decided on the course of action yet
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
The milestones have these meanings:
* v1.XX - stuff we would like to fit into this release
* v1.XX+1 - stuff we are leaving until the next release
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
* Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
## Closing Tickets ##
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
## Pull requests ##
Try to process pull requests promptly!
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
## Merges ##
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
## Release cycle ##
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
High impact regressions should be fixed before the next release.
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
## Mailing list ##
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
## TODO ##
I should probably make a dev@rclone.org to register with cloud providers.

File diff suppressed because it is too large Load Diff

10092
MANUAL.md

File diff suppressed because it is too large Load Diff

9969
MANUAL.txt

File diff suppressed because it is too large Load Diff

160
Makefile
View File

@@ -1,66 +1,100 @@
SHELL = /bin/bash
TAG := $(shell echo `git describe --tags`-`git rev-parse --abbrev-ref HEAD` | sed 's/-\([0-9]\)-/-0\1-/; s/-\(HEAD\|master\)$$//')
SHELL = bash
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
TAG_BRANCH :=
BRANCH_PATH :=
endif
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
LAST_TAG := $(shell git describe --tags --abbrev=0)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
GO_LATEST := $(findstring go1.8,$(GO_VERSION))
BETA_URL := http://beta.rclone.org/$(TAG)/
# Only needed for Go 1.5
export GO15VENDOREXPERIMENT=1
# Run full tests if go >= go1.9
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)"
endif
.PHONY: rclone
.PHONY: rclone vars version
rclone:
touch fs/version.go
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)"
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
cp -av `go env GOPATH`/bin/rclone .
vars:
@echo SHELL="'$(SHELL)'"
@echo BRANCH="'$(BRANCH)'"
@echo TAG="'$(TAG)'"
@echo LAST_TAG="'$(LAST_TAG)'"
@echo NEW_TAG="'$(NEW_TAG)'"
@echo GO_VERSION="'$(GO_VERSION)'"
@echo GO_LATEST="'$(GO_LATEST)'"
@echo FULL_TESTS="'$(FULL_TESTS)'"
@echo BETA_URL="'$(BETA_URL)'"
version:
@echo '$(TAG)'
# Full suite of integration tests
test: rclone
go test $(GO_FILES)
cd fs && go run test_all.go
go install github.com/ncw/rclone/fstest/test_all
-go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
@echo "Written logs in test.log and fs/test_all.log"
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(GO_FILES)
RCLONE_CONFIG="/notfound" go test -cpu=2 -race $(GO_FILES)
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
ifdef FULL_TESTS
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
endif
# Do source code quality checks
check: rclone
ifdef GO_LATEST
go tool vet -printfuncs Debugf,Infof,Logf,Errorf . 2>&1 | grep -E -v vendor/ ; test $$? -eq 1
errcheck $(GO_FILES)
ifdef FULL_TESTS
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
errcheck $(BUILDTAGS) ./...
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
go list ./... | grep -v /vendor/ | xargs -i golint {} | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
else
@echo Skipping tests as not on Go stable
@echo Skipping source quality tests as version of go too old
endif
gometalinter_install:
go get -u github.com/alecthomas/gometalinter
gometalinter --install --update
# We aren't using gometalinter as the default linter yet because
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
# 2. can't get -printfuncs working with the vet linter
gometalinter:
gometalinter ./...
# Get the build dependencies
build_dep:
ifdef GO_LATEST
ifdef FULL_TESTS
go get -u github.com/kisielk/errcheck
go get -u golang.org/x/tools/cmd/goimports
go get -u github.com/golang/lint/golint
go get -u github.com/inconshreveable/mousetrap
go get -u github.com/tools/godep
endif
# Get the release dependencies
release_dep:
go get -u github.com/goreleaser/nfpm/...
go get -u github.com/aktau/github-release
# Update dependencies
update:
rm -rf Godeps vendor
go get -t -u -f -v ./...
godep save ./...
go get -u github.com/golang/dep/cmd/dep
dep ensure -update -v
doc: rclone.1 MANUAL.html MANUAL.txt
@@ -79,6 +113,9 @@ MANUAL.txt: MANUAL.md
commanddocs: rclone
rclone gendocs docs/content/commands/
rcdocs: rclone
bin/make_rc_docs.sh
install: rclone
install -d ${DESTDIR}/usr/bin
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
@@ -87,7 +124,7 @@ clean:
go clean ./...
find . -name \*~ | xargs -r rm -f
rm -rf build docs/public
rm -f rclone rclonetest/rclonetest
rm -f rclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log
website:
cd docs && hugo
@@ -95,27 +132,75 @@ website:
upload_website: website
rclone -v sync docs/public memstore:www-rclone-org
tarball:
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
sign_upload:
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
cd build && sha256sum rclone-v* | gpg --clearsign > SHA256SUMS
check_sign:
cd build && gpg --verify MD5SUMS && gpg --decrypt MD5SUMS | md5sum -c
cd build && gpg --verify SHA1SUMS && gpg --decrypt SHA1SUMS | sha1sum -c
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload:
rclone -v copy build/ memstore:downloads-rclone-org
rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
upload_github:
./bin/upload-github $(TAG)
cross: doc
go run bin/cross-compile.go -release current $(TAG)
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
beta:
go run bin/cross-compile.go $(TAG)β
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)β
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
@echo Beta release ready at http://pub.rclone.org/$(TAG)%CE%B2/
@echo Beta release ready at https://pub.rclone.org/$(TAG)%CE%B2/
log_since_last_release:
git log $(LAST_TAG)..
compile_all:
ifdef FULL_TESTS
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)β
else
@echo Skipping compile all as version of go too old
endif
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
BUILD_FLAGS := -exclude "^(windows|darwin)/"
ifeq ($(TRAVIS_OS_NAME),osx)
BUILD_FLAGS := -include "^darwin/" -cgo
endif
travis_beta:
ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(TAG)β
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' build/ memstore:beta-rclone-org
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)β
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
# Fetch the windows builds from appveyor
fetch_windows:
rclone -v copy --include 'rclone-v*-windows-*.zip' $(BETA_UPLOAD) build/
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
md5sum build/rclone-*-windows-*.zip | sort
serve: website
cd docs && hugo server -v -w
@@ -123,8 +208,8 @@ tag: doc
@echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEW_TAG)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
git tag $(NEW_TAG)
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
@echo "Edit the new changelog in docs/content/changelog.md"
@echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
@git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
@@ -133,9 +218,12 @@ tag: doc
@echo "And finally run make retag before make cross etc"
retag:
git tag -f $(LAST_TAG)
git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
startdev:
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
gen_tests:
cd fstest/fstests && go generate
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -1,29 +1,40 @@
[![Logo](http://rclone.org/img/rclone-120x120.png)](http://rclone.org/)
[![Logo](https://rclone.org/img/rclone-120x120.png)](https://rclone.org/)
[Website](http://rclone.org) |
[Documentation](http://rclone.org/docs/) |
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
[Contributing](CONTRIBUTING.md) |
[Changelog](http://rclone.org/changelog/) |
[Installation](http://rclone.org/install/) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/)
[G+](https://google.com/+RcloneOrg)
[![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone) [![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
[![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone)
[![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master)
[![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
Rclone is a command line program to sync files and directories to and from
* Google Drive
* Amazon S3
* Openstack Swift / Rackspace cloud files / Memset Memstore
* Dropbox
* Google Cloud Storage
* Amazon Drive
* Microsoft One Drive
* Hubic
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
* Backblaze B2
* Yandex Disk
* Box
* Dropbox
* FTP
* Google Cloud Storage
* Google Drive
* HTTP
* Hubic
* Mega
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* OpenDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
* pCloud
* QingStor
* SFTP
* Webdav / Owncloud / Nextcloud
* Yandex Disk
* The local filesystem
Features
@@ -41,7 +52,7 @@ Features
See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
* http://rclone.org/
* https://rclone.org/
License
-------

View File

@@ -6,29 +6,36 @@ Making a release
* git status - make sure everything is checked in
* Check travis & appveyor builds are green
* make check
* make test
* make test # see integration test server or run locally
* make tag
* edit docs/content/changelog.md
* make doc
* git status - to check for new man pages - git add them
* # Update version number in snapcraft.yml
* git commit -a -v -m "Version v1.XX"
* make retag
* make release_dep
* # Set the GOPATH for a current stable go compiler
* make cross
* make upload
* make upload_website
* git checkout docs/content/commands # to undo date changes in commands
* git push --tags origin master
* git push --tags origin master:stable # update the stable branch for packager.io
* # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
* make fetch_windows
* make tarball
* make sign_upload
* make check_sign
* make upload
* make upload_website
* make upload_github
* make startdev
* # announce with forum post, twitter post, G+ post
Early in the next release cycle update the vendored dependencies
* Review any pinned packages in Gopkg.toml and remove if possible
* make update
* git status
* git add new files
* carry forward any patches to vendor stuff
* git commit -a -v
## Make version number go to -DEV and check in
Make the version number be just in a file?
Make the version number be just in a file?

View File

@@ -1,63 +0,0 @@
// Test AmazonCloudDrive filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package amazonclouddrive_test
import (
"testing"
"github.com/ncw/rclone/amazonclouddrive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func TestSetup(t *testing.T) {
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
fstests.RemoteName = "TestAmazonCloudDrive:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,20 +0,0 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone
environment:
GOPATH: c:\gopath
install:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go install
build_script:
- rmdir vendor\bazil.org\fuse /s /q
- go test -cpu=2 ./...
- go test -cpu=2 -short -race ./...

View File

@@ -1,301 +0,0 @@
package api
import (
"fmt"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
)
// Error describes a B2 error response
type Error struct {
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
Code string `json:"code"` // A single-identifier code that identifies the error.
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
}
// Error statisfies the error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
}
// Fatal statisfies the Fatal interface
//
// It indicates which errors should be treated as fatal
func (e *Error) Fatal() bool {
return e.Status == 403 // 403 errors shouldn't be retried
}
var _ fs.Fataler = (*Error)(nil)
// Account describes a B2 account
type Account struct {
ID string `json:"accountId"` // The identifier for the account.
}
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// Timestamp is a UTC time when this file was uploaded. It is a base
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
// fits in a 64 bit integer such as the type "long" in the programming
// language Java. It is intended to be compatible with Java's time
// long. For example, it can be passed directly into the java call
// Date.setTime(long time).
type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
timestamp := (*time.Time)(t).UTC().UnixNano()
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
}
// UnmarshalJSON turns JSON into a Timestamp
func (t *Timestamp) UnmarshalJSON(data []byte) error {
timestamp, err := strconv.ParseInt(string(data), 10, 64)
if err != nil {
return err
}
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
return nil
}
const versionFormat = "-v2006-01-02-150405.000"
// AddVersion adds the timestamp as a version string into the filename passed in.
func (t Timestamp) AddVersion(remote string) string {
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
s := (time.Time)(t).Format(versionFormat)
// Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1)
return base + s + ext
}
// RemoveVersion removes the timestamp from a filename as a version string.
//
// It returns the new file name and a timestamp, or the old filename
// and a zero timestamp.
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
newRemote = remote
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
if len(base) < len(versionFormat) {
return
}
versionStart := len(base) - len(versionFormat)
// Check it ends in -xxx
if base[len(base)-4] != '-' {
return
}
// Replace with .xxx for parsing
base = base[:len(base)-4] + "." + base[len(base)-3:]
newT, err := time.Parse(versionFormat, base[versionStart:])
if err != nil {
return
}
return Timestamp(newT), base[:versionStart] + ext
}
// IsZero returns true if the timestamp is unitialised
func (t Timestamp) IsZero() bool {
return (time.Time)(t).IsZero()
}
// Equal compares two timestamps
//
// If either are !IsZero then it returns false
func (t Timestamp) Equal(s Timestamp) bool {
if (time.Time)(t).IsZero() {
return false
}
if (time.Time)(s).IsZero() {
return false
}
return (time.Time)(t).Equal((time.Time)(s))
}
// File is info about a file
type File struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
Size int64 `json:"size"` // The number of bytes in the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
}
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
}
// ListBucketsResponse is as returned from the b2_list_buckets call
type ListBucketsResponse struct {
Buckets []Bucket `json:"buckets"`
}
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
type ListFileNamesRequest struct {
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
Prefix string `json:"prefix,omitempty"` // optional - Files returned will be limited to those with the given prefix. Defaults to the empty string, which matches all files.
Delimiter string `json:"delimiter,omitempty"` // Files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
}
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
type ListFileNamesResponse struct {
Files []File `json:"files"` // An array of objects, each one describing one file.
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
}
// GetUploadURLRequest is passed to b2_get_upload_url
type GetUploadURLRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
}
// GetUploadURLResponse is received from b2_get_upload_url
type GetUploadURLResponse struct {
BucketID string `json:"bucketId"` // The unique ID of the bucket.
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
}
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
type FileInfo struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
AccountID string `json:"accountId"` // Your account ID.
BucketID string `json:"bucketId"` // The bucket that the file is in.
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
}
// CreateBucketRequest is used to create a bucket
type CreateBucketRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// DeleteBucketRequest is used to create a bucket
type DeleteBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
}
// DeleteFileRequest is used to delete a file version
type DeleteFileRequest struct {
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
Name string `json:"fileName"` // The name of this file.
}
// HideFileRequest is used to delete a file
type HideFileRequest struct {
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
Name string `json:"fileName"` // The name of the file to hide.
}
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
type GetFileInfoRequest struct {
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
}
// StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file.
//
// If the original source of the file being uploaded has a last
// modified time concept, Backblaze recommends using
// src_last_modified_millis as the name, and a string holding the base
// 10 number number of milliseconds since midnight, January 1, 1970
// UTC. This fits in a 64 bit integer such as the type "long" in the
// programming language Java. It is intended to be compatible with
// Java's time long. For example, it can be passed directly into the
// Java call Date.setTime(long time).
//
// If the caller knows the SHA1 of the entire large file being
// uploaded, Backblaze recommends using large_file_sha1 as the name,
// and a 40 byte hex string representing the SHA1.
//
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
type StartLargeFileRequest struct {
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
}
// StartLargeFileResponse is the response to StartLargeFileRequest
type StartLargeFileResponse struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
}
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
type GetUploadPartURLRequest struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
}
// GetUploadPartURLResponse is received from b2_get_upload_url
type GetUploadPartURLResponse struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part.
}
// UploadPartResponse is the response to b2_upload_part
type UploadPartResponse struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
}
// FinishLargeFileRequest is passed to b2_finish_large_file
//
// The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore).
//
// Large files do not have a SHA1 checksum. The value will always be "none".
type FinishLargeFileRequest struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0.
}
// CancelLargeFileRequest is passed to b2_finish_large_file
//
// The response is a CancelLargeFileResponse
type CancelLargeFileRequest struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
}
// CancelLargeFileResponse is the response to CancelLargeFileRequest
type CancelLargeFileResponse struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
Name string `json:"fileName"` // The name of this file.
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
}

View File

@@ -1,87 +0,0 @@
package api_test
import (
"testing"
"time"
"github.com/ncw/rclone/b2/api"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
emptyT api.Timestamp
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
)
func TestTimestampMarshalJSON(t *testing.T) {
resB, err := t0.MarshalJSON()
res := string(resB)
require.NoError(t, err)
assert.Equal(t, "3661123", res)
resB, err = t1.MarshalJSON()
res = string(resB)
require.NoError(t, err)
assert.Equal(t, "981173106123", res)
}
func TestTimestampUnmarshalJSON(t *testing.T) {
var tActual api.Timestamp
err := tActual.UnmarshalJSON([]byte("981173106123"))
require.NoError(t, err)
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
}
func TestTimestampAddVersion(t *testing.T) {
for _, test := range []struct {
t api.Timestamp
in string
expected string
}{
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
{t1, "potato", "potato-v2001-02-03-040506-123"},
{t1, "", "-v2001-02-03-040506-123"},
} {
actual := test.t.AddVersion(test.in)
assert.Equal(t, test.expected, actual, test.in)
}
}
func TestTimestampRemoveVersion(t *testing.T) {
for _, test := range []struct {
in string
expectedT api.Timestamp
expectedRemote string
}{
{"potato.txt", emptyT, "potato.txt"},
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
{"potato-v2001-02-03-040506-123", t1, "potato"},
{"-v2001-02-03-040506-123", t1, ""},
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
} {
actualT, actualRemote := api.RemoveVersion(test.in)
assert.Equal(t, test.expectedT, actualT, test.in)
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
}
}
func TestTimestampIsZero(t *testing.T) {
assert.True(t, emptyT.IsZero())
assert.False(t, t0.IsZero())
assert.False(t, t1.IsZero())
}
func TestTimestampEqual(t *testing.T) {
assert.False(t, emptyT.Equal(emptyT))
assert.False(t, t0.Equal(emptyT))
assert.False(t, emptyT.Equal(t0))
assert.False(t, t0.Equal(t1))
assert.False(t, t1.Equal(t0))
assert.True(t, t0.Equal(t0))
assert.True(t, t1.Equal(t1))
}

View File

@@ -1,63 +0,0 @@
// Test B2 filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package b2_test
import (
"testing"
"github.com/ncw/rclone/b2"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func TestSetup(t *testing.T) {
fstests.NilObject = fs.Object((*b2.Object)(nil))
fstests.RemoteName = "TestB2:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,302 +0,0 @@
// Upload large files for b2
//
// Docs - https://www.backblaze.com/b2/docs/large_files.html
package b2
import (
"bytes"
"crypto/sha1"
"fmt"
"io"
"sync"
"github.com/ncw/rclone/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/rest"
"github.com/pkg/errors"
)
// largeUpload is used to control the upload of large files which need chunking
type largeUpload struct {
f *Fs // parent Fs
o *Object // object being uploaded
in io.Reader // read the data from here
id string // ID of the file being uploaded
size int64 // total size
parts int64 // calculated number of parts
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
}
// newLargeUpload starts an upload of object o from in with metadata in src
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
remote := o.remote
size := src.Size()
parts := size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
}
if parts > maxParts {
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
}
modTime := src.ModTime()
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
}
bucketID, err := f.getBucketID()
if err != nil {
return nil, err
}
var request = api.StartLargeFileRequest{
BucketID: bucketID,
Name: o.fs.root + remote,
ContentType: fs.MimeType(src),
Info: map[string]string{
timeKey: timeString(modTime),
},
}
// Set the SHA1 if known
if calculatedSha1, err := src.Hash(fs.HashSHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
up = &largeUpload{
f: f,
o: o,
in: in,
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, parts),
}
return up, nil
}
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
//
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
}
return upload, nil
}
// returnUploadURL returns the UploadURL to the cache
func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
if upload == nil {
return
}
up.uploadMu.Lock()
up.uploads = append(up.uploads, upload)
up.uploadMu.Unlock()
}
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func (up *largeUpload) clearUploadURL() {
up.uploadMu.Lock()
up.uploads = nil
up.uploadMu.Unlock()
}
// Transfer a chunk
func (up *largeUpload) transferChunk(part int64, body []byte) error {
calculatedSHA1 := fmt.Sprintf("%x", sha1.Sum(body))
up.sha1s[part-1] = calculatedSHA1
size := int64(len(body))
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL
upload, err := up.getUploadURL()
if err != nil {
return false, err
}
// Authorization
//
// An upload authorization token, from b2_get_upload_part_url.
//
// X-Bz-Part-Number
//
// A number from 1 to 10000. The parts uploaded for one file
// must have contiguous numbers, starting with 1.
//
// Content-Length
//
// The number of bytes in the file being uploaded. Note that
// this header is required; you cannot leave it out and just
// use chunked encoding. The minimum size of every part but
// the last one is 100MB.
//
// X-Bz-Content-Sha1
//
// The SHA1 checksum of the this part of the file. B2 will
// check this when the part is uploaded, to make sure that the
// data arrived correctly. The same SHA1 checksum must be
// passed to b2_finish_large_file.
opts := rest.Opts{
Method: "POST",
Absolute: true,
Path: upload.UploadURL,
Body: fs.AccountPart(up.o, bytes.NewBuffer(body)),
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: calculatedSHA1,
},
ContentLength: &size,
}
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
retry, err := up.f.shouldRetry(resp, err)
// On retryable error clear PartUploadURL
if retry {
fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err)
upload = nil
}
up.returnUploadURL(upload)
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done sending chunk %d", part)
}
return err
}
// finish closes off the large upload
func (up *largeUpload) finish() error {
opts := rest.Opts{
Method: "POST",
Path: "/b2_finish_large_file",
}
var request = api.FinishLargeFileRequest{
ID: up.id,
SHA1s: up.sha1s,
}
var response api.FileInfo
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return err
}
return up.o.decodeMetaDataFileInfo(&response)
}
// cancel aborts the large upload
func (up *largeUpload) cancel() error {
opts := rest.Opts{
Method: "POST",
Path: "/b2_cancel_large_file",
}
var request = api.CancelLargeFileRequest{
ID: up.id,
}
var response api.CancelLargeFileResponse
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
return err
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload() error {
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size
errs := make(chan error, 1)
var wg sync.WaitGroup
var err error
fs.AccountByPart(up.o) // Cancel whole file accounting before reading
outer:
for part := int64(1); part <= up.parts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
reqSize := remaining
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
// Get a block of memory
buf := up.f.getUploadBlock()[:reqSize]
// Read the chunk
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putUploadBlock(buf)
break outer
}
// Transfer the chunk
wg.Add(1)
go func(part int64, buf []byte) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(part, buf)
if err != nil {
select {
case errs <- err:
default:
}
}
}(part, buf)
remaining -= reqSize
}
wg.Wait()
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel()
if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
}
// Check any errors
fs.Debugf(up.o, "Finishing large file upload")
return up.finish()
}

45
backend/alias/alias.go Normal file
View File

@@ -0,0 +1,45 @@
package alias
import (
"errors"
"path"
"path/filepath"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "alias",
Description: "Alias for a existing remote",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
}},
}
fs.Register(fsi)
}
// NewFs contstructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(name, root string) (fs.Fs, error) {
remote := config.FileGet(name, "remote")
if remote == "" {
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
}
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
if err != nil {
return nil, err
}
root = filepath.ToSlash(root)
return fsInfo.NewFs(configName, path.Join(fsPath, root))
}

View File

@@ -0,0 +1,104 @@
package alias
import (
"fmt"
"path"
"path/filepath"
"sort"
"testing"
_ "github.com/ncw/rclone/backend/local" // pull in test backend
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/stretchr/testify/require"
)
var (
remoteName = "TestAlias"
)
func prepare(t *testing.T, root string) {
config.LoadConfig()
// Configure the remote
config.FileSet(remoteName, "type", "alias")
config.FileSet(remoteName, "remote", root)
}
func TestNewFS(t *testing.T) {
type testEntry struct {
remote string
size int64
isDir bool
}
for testi, test := range []struct {
remoteRoot string
fsRoot string
fsList string
wantOK bool
entries []testEntry
}{
{"", "", "", true, []testEntry{
{"four", -1, true},
{"one%.txt", 6, false},
{"three", -1, true},
{"two.html", 7, false},
}},
{"", "four", "", true, []testEntry{
{"five", -1, true},
{"under four.txt", 9, false},
}},
{"", "", "four", true, []testEntry{
{"four/five", -1, true},
{"four/under four.txt", 9, false},
}},
{"four", "..", "", true, []testEntry{
{"four", -1, true},
{"one%.txt", 6, false},
{"three", -1, true},
{"two.html", 7, false},
}},
{"four", "../three", "", true, []testEntry{
{"underthree.txt", 9, false},
}},
} {
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
require.NoError(t, err, what)
prepare(t, remoteRoot)
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
require.NoError(t, err, what)
gotEntries, err := f.List(test.fsList)
require.NoError(t, err, what)
sort.Sort(gotEntries)
require.Equal(t, len(test.entries), len(gotEntries), what)
for i, gotEntry := range gotEntries {
what := fmt.Sprintf("%s, entry=%d", what, i)
wantEntry := test.entries[i]
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
_, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.isDir, isDir, what)
}
}
}
func TestNewFSNoRemote(t *testing.T) {
prepare(t, "")
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
require.Error(t, err)
require.Nil(t, f)
}
func TestNewFSInvalidRemote(t *testing.T) {
prepare(t, "not_existing_test_remote:")
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
require.Error(t, err)
require.Nil(t, f)
}

View File

@@ -0,0 +1 @@
apple

View File

@@ -0,0 +1 @@
beetroot

View File

@@ -0,0 +1 @@
hello

View File

@@ -0,0 +1 @@
rutabaga

View File

@@ -0,0 +1 @@
potato

29
backend/all/all.go Normal file
View File

@@ -0,0 +1,29 @@
package all
import (
// Active file systems
_ "github.com/ncw/rclone/backend/alias"
_ "github.com/ncw/rclone/backend/amazonclouddrive"
_ "github.com/ncw/rclone/backend/azureblob"
_ "github.com/ncw/rclone/backend/b2"
_ "github.com/ncw/rclone/backend/box"
_ "github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive"
_ "github.com/ncw/rclone/backend/dropbox"
_ "github.com/ncw/rclone/backend/ftp"
_ "github.com/ncw/rclone/backend/googlecloudstorage"
_ "github.com/ncw/rclone/backend/http"
_ "github.com/ncw/rclone/backend/hubic"
_ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/mega"
_ "github.com/ncw/rclone/backend/onedrive"
_ "github.com/ncw/rclone/backend/opendrive"
_ "github.com/ncw/rclone/backend/pcloud"
_ "github.com/ncw/rclone/backend/qingstor"
_ "github.com/ncw/rclone/backend/s3"
_ "github.com/ncw/rclone/backend/sftp"
_ "github.com/ncw/rclone/backend/swift"
_ "github.com/ncw/rclone/backend/webdav"
_ "github.com/ncw/rclone/backend/yandex"
)

View File

@@ -3,7 +3,6 @@
package amazonclouddrive
/*
FIXME make searching for directory in id and file in id more efficient
- use the name: search parameter - remember the escaping rules
- use Folder GetNode and GetFile
@@ -19,37 +18,38 @@ import (
"log"
"net/http"
"path"
"regexp"
"strings"
"time"
"github.com/ncw/go-acd"
"github.com/ncw/rclone/dircache"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/pacer"
"github.com/ncw/rclone/rest"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "amzn1.application-oa2-client.6bf18d2d1f5b485c94c8988bb03ad0e7"
rcloneEncryptedClientSecret = "ZP12wYlGw198FtmqfOxyNAGXU3fwVcQdmt--ba1d00wJnUs0LOzvVyXVDbqhbcUqnr5Vd1QejwWmiv1Ep7UJG1kUQeuBP5n9goXWd5MrAf0"
folderKind = "FOLDER"
fileKind = "FILE"
assetKind = "ASSET"
statusAvailable = "AVAILABLE"
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time.Millisecond
warnFileSize = 50000 << 20 // Display warning for files larger than this size
folderKind = "FOLDER"
fileKind = "FILE"
statusAvailable = "AVAILABLE"
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time.Millisecond
warnFileSize = 50000 << 20 // Display warning for files larger than this size
)
// Globals
var (
// Flags
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
uploadWaitPerGB = fs.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
// Description of how to auth for this app
acdConfig = &oauth2.Config{
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
@@ -57,8 +57,8 @@ var (
AuthURL: "https://www.amazon.com/ap/oa",
TokenURL: "https://api.amazon.com/auth/o2/token",
},
ClientID: rcloneClientID,
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
ClientID: "",
ClientSecret: "",
RedirectURL: oauthutil.RedirectURL,
}
)
@@ -76,14 +76,20 @@ func init() {
}
},
Options: []fs.Option{{
Name: fs.ConfigClientID,
Help: "Amazon Application Client Id - leave blank normally.",
Name: config.ConfigClientID,
Help: "Amazon Application Client Id - required.",
}, {
Name: fs.ConfigClientSecret,
Help: "Amazon Application Client Secret - leave blank normally.",
Name: config.ConfigClientSecret,
Help: "Amazon Application Client Secret - required.",
}, {
Name: config.ConfigAuthURL,
Help: "Auth server URL - leave blank to use Amazon's.",
}, {
Name: config.ConfigTokenURL,
Help: "Token server url - leave blank to use Amazon's.",
}},
})
fs.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
}
// Fs represents a remote acd server
@@ -130,9 +136,6 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// Pattern to match a acd path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parsePath parses an acd 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
@@ -168,13 +171,37 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
return true, err
}
}
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// If query parameters contain X-Amz-Algorithm remove Authorization header
//
// This happens when ACD redirects to S3 for the download. The oauth
// transport puts an Authorization header in which we need to remove
// otherwise we get this message from AWS
//
// Only one auth mechanism allowed; only the X-Amz-Algorithm query
// parameter, Signature query string parameter or the Authorization
// header should be specified
func filterRequest(req *http.Request) {
if req.URL.Query().Get("X-Amz-Algorithm") != "" {
fs.Debugf(nil, "Removing Authorization: header after redirect to S3")
req.Header.Del("Authorization")
}
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, acdConfig)
baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
do.SetRequestFilter(filterRequest)
} else {
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, acdConfig, baseClient)
if err != nil {
log.Fatalf("Failed to configure Amazon Drive: %v", err)
}
@@ -185,9 +212,19 @@ func NewFs(name, root string) (fs.Fs, error) {
root: root,
c: c,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
noAuthClient: fs.Config.Client(),
noAuthClient: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{CaseInsensitive: true, ReadMimeType: true}).Fill(f)
f.features = (&fs.Features{
CaseInsensitive: true,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.getRootInfo()
return err
})
// Update endpoints
var resp *http.Response
@@ -206,12 +243,6 @@ func NewFs(name, root string) (fs.Fs, error) {
}
f.trueRootID = *rootInfo.Id
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.getRootInfo()
return err
})
f.dirCache = dircache.New(root, f.trueRootID, f)
// Find the current root
@@ -395,45 +426,54 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
return
}
// ListDir reads the directory specified by the job into out, returning any more jobs
func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
fs.Debugf(f, "Reading %q", job.Path)
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil {
return nil, err
}
maxTries := fs.Config.LowLevelRetries
var iErr error
for tries := 1; tries <= maxTries; tries++ {
_, err = f.listAll(job.DirID, "", false, false, func(node *acd.Node) bool {
remote := job.Path + *node.Name
entries = nil
_, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool {
remote := path.Join(dir, *node.Name)
switch *node.Kind {
case folderKind:
if out.IncludeDirectory(remote) {
dir := &fs.Dir{
Name: remote,
Bytes: -1,
Count: -1,
}
dir.When, _ = time.Parse(timeFormat, *node.ModifiedDate) // FIXME
if out.AddDir(dir) {
return true
}
if job.Depth > 0 {
jobs = append(jobs, dircache.ListDirJob{DirID: *node.Id, Path: remote + "/", Depth: job.Depth - 1})
}
}
// cache the directory ID for later lookups
f.dirCache.Put(remote, *node.Id)
when, _ := time.Parse(timeFormat, *node.ModifiedDate) // FIXME
d := fs.NewDir(remote, when).SetID(*node.Id)
entries = append(entries, d)
case fileKind:
o, err := f.newObjectWithInfo(remote, node)
if err != nil {
out.SetError(err)
return true
}
if out.Add(o) {
iErr = err
return true
}
entries = append(entries, o)
default:
// ignore ASSET etc
}
return false
})
if fs.IsRetryError(err) {
fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", job.Path, err, tries, maxTries)
if iErr != nil {
return nil, iErr
}
if fserrors.IsRetryError(err) {
fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", dir, err, tries, maxTries)
continue
}
if err != nil {
@@ -441,13 +481,7 @@ func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.
}
break
}
fs.Debugf(f, "Finished reading %q", job.Path)
return jobs, err
}
// List walks the path returning iles and directories into out
func (f *Fs) List(out fs.ListOpts, dir string) {
f.dirCache.List(f, out, dir)
return entries, nil
}
// checkUpload checks to see if an error occurred after the file was
@@ -534,7 +568,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
size := src.Size()
// Temporary Object under construction
@@ -546,7 +580,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
err := o.readMetaData()
switch err {
case nil:
return o, o.Update(in, src)
return o, o.Update(in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
default:
@@ -841,8 +875,8 @@ func (f *Fs) Precision() time.Duration {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Copy src to this remote using server side copy operations.
@@ -898,9 +932,9 @@ func (o *Object) Remote() string {
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
if o.info.ContentProperties != nil && o.info.ContentProperties.Md5 != nil {
return *o.info.ContentProperties.Md5, nil
@@ -983,7 +1017,7 @@ func (o *Object) Storable() bool {
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
bigObject := o.Size() >= int64(tempLinkThreshold)
if bigObject {
fs.Debugf(o, "Dowloading large object via tempLink")
fs.Debugf(o, "Downloading large object via tempLink")
}
file := acd.File{Node: o.info}
var resp *http.Response
@@ -1002,7 +1036,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
file := acd.File{Node: o.info}
var info *acd.File
var resp *http.Response
@@ -1167,6 +1201,128 @@ func (o *Object) MimeType() string {
return ""
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behaviour of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
checkpoint := config.FileGet(f.name, "checkpoint")
quit := make(chan bool)
go func() {
for {
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
fs.Debugf(f, "Unable to save checkpoint: %v", err)
}
select {
case <-quit:
return
case <-time.After(pollInterval):
}
}
}()
return quit
}
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
var err error
var resp *http.Response
var reachedEnd bool
var csCount int
var nodeCount int
fs.Debugf(f, "Checking for changes on remote (Checkpoint %q)", checkpoint)
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err = f.c.Changes.GetChangesFunc(&acd.ChangesOptions{
Checkpoint: checkpoint,
IncludePurged: true,
}, func(changeSet *acd.ChangeSet, err error) error {
if err != nil {
return err
}
type entryType struct {
path string
entryType fs.EntryType
}
var pathsToClear []entryType
csCount++
nodeCount += len(changeSet.Nodes)
if changeSet.End {
reachedEnd = true
}
if changeSet.Checkpoint != "" {
checkpoint = changeSet.Checkpoint
}
for _, node := range changeSet.Nodes {
if path, ok := f.dirCache.GetInv(*node.Id); ok {
if node.IsFile() {
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
} else {
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
}
continue
}
if node.IsFile() {
// translate the parent dir of this object
if len(node.Parents) > 0 {
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
// and append the drive file name to compute the full file name
if len(path) > 0 {
path = path + "/" + *node.Name
} else {
path = *node.Name
}
// this will now clear the actual file too
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
}
} else { // a true root object that is changed
pathsToClear = append(pathsToClear, entryType{path: *node.Name, entryType: fs.EntryObject})
}
}
}
visitedPaths := make(map[string]bool)
for _, entry := range pathsToClear {
if _, ok := visitedPaths[entry.path]; ok {
continue
}
visitedPaths[entry.path] = true
notifyFunc(entry.path, entry.entryType)
}
return nil
})
return false, err
})
fs.Debugf(f, "Got %d ChangeSets with %d Nodes", csCount, nodeCount)
if err != nil && err != io.ErrUnexpectedEOF {
fs.Debugf(f, "Failed to get Changes: %v", err)
return checkpoint
}
if reachedEnd {
reachedEnd = false
fs.Debugf(f, "All changes were processed. Waiting for more.")
} else if checkpoint == "" {
fs.Debugf(f, "Did not get any checkpoint, something went wrong! %+v", resp)
}
return checkpoint
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
if o.info.Id == nil {
return ""
}
return *o.info.Id
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1175,6 +1331,8 @@ var (
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)

View File

@@ -0,0 +1,20 @@
// Test AmazonCloudDrive filesystem interface
// +build acd
package amazonclouddrive_test
import (
"testing"
"github.com/ncw/rclone/backend/amazonclouddrive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
fstests.RemoteName = "TestAmazonCloudDrive:"
fstests.Run(t)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
// Test AzureBlob filesystem interface
package azureblob_test
import (
"testing"
"github.com/ncw/rclone/backend/azureblob"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*azureblob.Object)(nil),
})
}

301
backend/b2/api/types.go Normal file
View File

@@ -0,0 +1,301 @@
package api
import (
"fmt"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs/fserrors"
)
// Error describes a B2 error response
type Error struct {
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
Code string `json:"code"` // A single-identifier code that identifies the error.
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
}
// Error statisfies the error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
}
// Fatal statisfies the Fatal interface
//
// It indicates which errors should be treated as fatal
func (e *Error) Fatal() bool {
return e.Status == 403 // 403 errors shouldn't be retried
}
var _ fserrors.Fataler = (*Error)(nil)
// Account describes a B2 account
type Account struct {
ID string `json:"accountId"` // The identifier for the account.
}
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// Timestamp is a UTC time when this file was uploaded. It is a base
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
// fits in a 64 bit integer such as the type "long" in the programming
// language Java. It is intended to be compatible with Java's time
// long. For example, it can be passed directly into the java call
// Date.setTime(long time).
type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
timestamp := (*time.Time)(t).UTC().UnixNano()
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
}
// UnmarshalJSON turns JSON into a Timestamp
func (t *Timestamp) UnmarshalJSON(data []byte) error {
timestamp, err := strconv.ParseInt(string(data), 10, 64)
if err != nil {
return err
}
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
return nil
}
const versionFormat = "-v2006-01-02-150405.000"
// AddVersion adds the timestamp as a version string into the filename passed in.
func (t Timestamp) AddVersion(remote string) string {
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
s := (time.Time)(t).Format(versionFormat)
// Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1)
return base + s + ext
}
// RemoveVersion removes the timestamp from a filename as a version string.
//
// It returns the new file name and a timestamp, or the old filename
// and a zero timestamp.
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
newRemote = remote
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
if len(base) < len(versionFormat) {
return
}
versionStart := len(base) - len(versionFormat)
// Check it ends in -xxx
if base[len(base)-4] != '-' {
return
}
// Replace with .xxx for parsing
base = base[:len(base)-4] + "." + base[len(base)-3:]
newT, err := time.Parse(versionFormat, base[versionStart:])
if err != nil {
return
}
return Timestamp(newT), base[:versionStart] + ext
}
// IsZero returns true if the timestamp is unitialised
func (t Timestamp) IsZero() bool {
return (time.Time)(t).IsZero()
}
// Equal compares two timestamps
//
// If either are !IsZero then it returns false
func (t Timestamp) Equal(s Timestamp) bool {
if (time.Time)(t).IsZero() {
return false
}
if (time.Time)(s).IsZero() {
return false
}
return (time.Time)(t).Equal((time.Time)(s))
}
// File is info about a file
type File struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
Size int64 `json:"size"` // The number of bytes in the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
}
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
}
// ListBucketsResponse is as returned from the b2_list_buckets call
type ListBucketsResponse struct {
Buckets []Bucket `json:"buckets"`
}
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
type ListFileNamesRequest struct {
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
Prefix string `json:"prefix,omitempty"` // optional - Files returned will be limited to those with the given prefix. Defaults to the empty string, which matches all files.
Delimiter string `json:"delimiter,omitempty"` // Files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
}
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
type ListFileNamesResponse struct {
Files []File `json:"files"` // An array of objects, each one describing one file.
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
}
// GetUploadURLRequest is passed to b2_get_upload_url
type GetUploadURLRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
}
// GetUploadURLResponse is received from b2_get_upload_url
type GetUploadURLResponse struct {
BucketID string `json:"bucketId"` // The unique ID of the bucket.
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
}
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
type FileInfo struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
AccountID string `json:"accountId"` // Your account ID.
BucketID string `json:"bucketId"` // The bucket that the file is in.
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
}
// CreateBucketRequest is used to create a bucket
type CreateBucketRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// DeleteBucketRequest is used to create a bucket
type DeleteBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
}
// DeleteFileRequest is used to delete a file version
type DeleteFileRequest struct {
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
Name string `json:"fileName"` // The name of this file.
}
// HideFileRequest is used to delete a file
type HideFileRequest struct {
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
Name string `json:"fileName"` // The name of the file to hide.
}
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
type GetFileInfoRequest struct {
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
}
// StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file.
//
// If the original source of the file being uploaded has a last
// modified time concept, Backblaze recommends using
// src_last_modified_millis as the name, and a string holding the base
// 10 number number of milliseconds since midnight, January 1, 1970
// UTC. This fits in a 64 bit integer such as the type "long" in the
// programming language Java. It is intended to be compatible with
// Java's time long. For example, it can be passed directly into the
// Java call Date.setTime(long time).
//
// If the caller knows the SHA1 of the entire large file being
// uploaded, Backblaze recommends using large_file_sha1 as the name,
// and a 40 byte hex string representing the SHA1.
//
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
type StartLargeFileRequest struct {
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
}
// StartLargeFileResponse is the response to StartLargeFileRequest
type StartLargeFileResponse struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
}
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
type GetUploadPartURLRequest struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
}
// GetUploadPartURLResponse is received from b2_get_upload_url
type GetUploadPartURLResponse struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part.
}
// UploadPartResponse is the response to b2_upload_part
type UploadPartResponse struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
}
// FinishLargeFileRequest is passed to b2_finish_large_file
//
// The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore).
//
// Large files do not have a SHA1 checksum. The value will always be "none".
type FinishLargeFileRequest struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0.
}
// CancelLargeFileRequest is passed to b2_finish_large_file
//
// The response is a CancelLargeFileResponse
type CancelLargeFileRequest struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
}
// CancelLargeFileResponse is the response to CancelLargeFileRequest
type CancelLargeFileResponse struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
Name string `json:"fileName"` // The name of this file.
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
}

View File

@@ -0,0 +1,87 @@
package api_test
import (
"testing"
"time"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
emptyT api.Timestamp
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
)
func TestTimestampMarshalJSON(t *testing.T) {
resB, err := t0.MarshalJSON()
res := string(resB)
require.NoError(t, err)
assert.Equal(t, "3661123", res)
resB, err = t1.MarshalJSON()
res = string(resB)
require.NoError(t, err)
assert.Equal(t, "981173106123", res)
}
func TestTimestampUnmarshalJSON(t *testing.T) {
var tActual api.Timestamp
err := tActual.UnmarshalJSON([]byte("981173106123"))
require.NoError(t, err)
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
}
func TestTimestampAddVersion(t *testing.T) {
for _, test := range []struct {
t api.Timestamp
in string
expected string
}{
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
{t1, "potato", "potato-v2001-02-03-040506-123"},
{t1, "", "-v2001-02-03-040506-123"},
} {
actual := test.t.AddVersion(test.in)
assert.Equal(t, test.expected, actual, test.in)
}
}
func TestTimestampRemoveVersion(t *testing.T) {
for _, test := range []struct {
in string
expectedT api.Timestamp
expectedRemote string
}{
{"potato.txt", emptyT, "potato.txt"},
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
{"potato-v2001-02-03-040506-123", t1, "potato"},
{"-v2001-02-03-040506-123", t1, ""},
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
} {
actualT, actualRemote := api.RemoveVersion(test.in)
assert.Equal(t, test.expectedT, actualT, test.in)
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
}
}
func TestTimestampIsZero(t *testing.T) {
assert.True(t, emptyT.IsZero())
assert.False(t, t0.IsZero())
assert.False(t, t1.IsZero())
}
func TestTimestampEqual(t *testing.T) {
assert.False(t, emptyT.Equal(emptyT))
assert.False(t, t0.Equal(emptyT))
assert.False(t, emptyT.Equal(t0))
assert.False(t, t0.Equal(t1))
assert.False(t, t1.Equal(t0))
assert.True(t, t0.Equal(t0))
assert.True(t, t1.Equal(t1))
}

View File

@@ -5,14 +5,13 @@ package b2
// checking SHA1s?
import (
"bufio"
"bytes"
"crypto/sha1"
"fmt"
"hash"
gohash "hash"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"regexp"
"strconv"
@@ -20,10 +19,17 @@ import (
"sync"
"time"
"github.com/ncw/rclone/b2/api"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/pacer"
"github.com/ncw/rclone/rest"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
)
@@ -46,11 +52,12 @@ const (
// Globals
var (
minChunkSize = fs.SizeSuffix(100E6)
minChunkSize = fs.SizeSuffix(5E6)
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
uploadCutoff = fs.SizeSuffix(200E6)
b2TestMode = fs.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
b2Versions = fs.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
b2TestMode = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
b2Versions = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
b2HardDelete = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
)
@@ -72,8 +79,8 @@ func init() {
},
},
})
fs.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
fs.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
}
// Fs represents a remote b2 server
@@ -86,6 +93,8 @@ type Fs struct {
endpoint string // name of the starting api endpoint
srv *rest.Client // the connection to the b2 server
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
bucketIDMutex sync.Mutex // mutex to protect _bucketID
_bucketID string // the ID of the bucket we are working on
info api.AuthorizeAccountResponse // result of authorize call
@@ -184,7 +193,7 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
}
return true, err
}
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// shouldRetry returns a boolean as to whether this resp and err
@@ -225,7 +234,7 @@ func errorHandler(resp *http.Response) error {
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
if uploadCutoff < chunkSize {
return nil, errors.Errorf("b2: upload cutoff must be less than chunk size %v - was %v", chunkSize, uploadCutoff)
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", uploadCutoff, chunkSize)
}
if chunkSize < minChunkSize {
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize)
@@ -234,15 +243,15 @@ func NewFs(name, root string) (fs.Fs, error) {
if err != nil {
return nil, err
}
account := fs.ConfigFileGet(name, "account")
account := config.FileGet(name, "account")
if account == "" {
return nil, errors.New("account not found")
}
key := fs.ConfigFileGet(name, "key")
key := config.FileGet(name, "key")
if key == "" {
return nil, errors.New("key not found")
}
endpoint := fs.ConfigFileGet(name, "endpoint", defaultEndpoint)
endpoint := config.FileGet(name, "endpoint", defaultEndpoint)
f := &Fs{
name: name,
bucket: bucket,
@@ -250,11 +259,15 @@ func NewFs(name, root string) (fs.Fs, error) {
account: account,
key: key,
endpoint: endpoint,
srv: rest.NewClient(fs.Config.Client()).SetErrorHandler(errorHandler),
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
bufferTokens: make(chan []byte, fs.Config.Transfers),
}
f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
// Set the test flag if required
if *b2TestMode != "" {
testMode := strings.TrimSpace(*b2TestMode)
@@ -301,9 +314,9 @@ func (f *Fs) authorizeAccount() error {
f.authMu.Lock()
defer f.authMu.Unlock()
opts := rest.Opts{
Absolute: true,
Method: "GET",
Path: f.endpoint + "/b2api/v1/b2_authorize_account",
Path: "/b2api/v1/b2_authorize_account",
RootURL: f.endpoint,
UserName: f.account,
Password: f.key,
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
@@ -436,18 +449,14 @@ var errEndList = errors.New("end list")
// than 1000)
//
// If hidden is set then it will list the hidden (deleted) files too.
func (f *Fs) list(dir string, level int, prefix string, limit int, hidden bool, fn listFn) error {
func (f *Fs) list(dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error {
root := f.root
if dir != "" {
root += dir + "/"
}
delimiter := ""
switch level {
case 1:
if !recurse {
delimiter = "/"
case fs.MaxLevel:
default:
return fs.ErrorLevelNotSupported
}
bucketID, err := f.getBucketID()
if err != nil {
@@ -495,7 +504,7 @@ func (f *Fs) list(dir string, level int, prefix string, limit int, hidden bool,
}
remote := file.Name[len(f.root):]
// Check for directory
isDirectory := level != 0 && strings.HasSuffix(remote, "/")
isDirectory := strings.HasSuffix(remote, "/")
if isDirectory {
remote = remote[:len(remote)-1]
}
@@ -520,77 +529,125 @@ func (f *Fs) list(dir string, level int, prefix string, limit int, hidden bool,
return nil
}
// listFiles walks the path returning files and directories to out
func (f *Fs) listFiles(out fs.ListOpts, dir string) {
defer out.Finished()
// List the objects
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
if isDirectory {
d := fs.NewDir(remote, time.Time{})
return d, nil
}
if remote == *last {
remote = object.UploadTimestamp.AddVersion(remote)
} else {
*last = remote
}
// hide objects represent deleted files which we don't list
if object.Action == "hide" {
return nil, nil
}
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketOKMu.Unlock()
}
}
// listDir lists a single directory
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
last := ""
err := f.list(dir, out.Level(), "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
if isDirectory {
dir := &fs.Dir{
Name: remote,
Bytes: -1,
Count: -1,
}
if out.AddDir(dir) {
return fs.ErrorListAborted
}
} else {
if remote == last {
remote = object.UploadTimestamp.AddVersion(remote)
} else {
last = remote
}
// hide objects represent deleted files which we don't list
if object.Action == "hide" {
return nil
}
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return err
}
if out.Add(o) {
return fs.ErrorListAborted
}
err = f.list(dir, false, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
out.SetError(err)
return nil, err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return entries, nil
}
// listBuckets returns all the buckets to out
func (f *Fs) listBuckets(out fs.ListOpts, dir string) {
defer out.Finished()
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
out.SetError(fs.ErrorListOnlyRoot)
return
return nil, fs.ErrorListBucketRequired
}
err := f.listBucketsToFn(func(bucket *api.Bucket) error {
dir := &fs.Dir{
Name: bucket.Name,
Bytes: -1,
Count: -1,
}
if out.AddDir(dir) {
return fs.ErrorListAborted
}
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d)
return nil
})
if err != nil {
out.SetError(err)
return nil, err
}
return entries, nil
}
// List walks the path returning files and directories to out
func (f *Fs) List(out fs.ListOpts, dir string) {
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" {
f.listBuckets(out, dir)
} else {
f.listFiles(out, dir)
return f.listBuckets(dir)
}
return
return f.listDir(dir)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
last := ""
err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
if err != nil {
return err
}
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
// listBucketFn is called from listBucketsToFn to handle a bucket
@@ -660,19 +717,25 @@ func (f *Fs) clearBucketID() {
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
}
return fs, fs.Update(in, src)
return fs, fs.Update(in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
// Can't create subdirs
if dir != "" {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
opts := rest.Opts{
@@ -697,6 +760,7 @@ func (f *Fs) Mkdir(dir string) error {
_, getBucketErr := f.getBucketID()
if getBucketErr == nil {
// found so it is our bucket
f.bucketOK = true
return nil
}
if getBucketErr != fs.ErrorDirNotFound {
@@ -707,6 +771,7 @@ func (f *Fs) Mkdir(dir string) error {
return errors.Wrap(err, "failed to create bucket")
}
f.setBucketID(response.ID)
f.bucketOK = true
return nil
}
@@ -714,6 +779,8 @@ func (f *Fs) Mkdir(dir string) error {
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
@@ -737,6 +804,7 @@ func (f *Fs) Rmdir(dir string) error {
if err != nil {
return errors.Wrap(err, "failed to delete bucket")
}
f.bucketOK = false
f.clearBucketID()
f.clearUploadURL()
return nil
@@ -747,6 +815,31 @@ func (f *Fs) Precision() time.Duration {
return time.Millisecond
}
// hide hides a file on the remote
func (f *Fs) hide(Name string) error {
bucketID, err := f.getBucketID()
if err != nil {
return err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_hide_file",
}
var request = api.HideFileRequest{
BucketID: bucketID,
Name: Name,
}
var response api.File
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrapf(err, "failed to hide %q", Name)
}
return nil
}
// deleteByID deletes a file version given Name and ID
func (f *Fs) deleteByID(ID, Name string) error {
opts := rest.Opts{
@@ -795,16 +888,16 @@ func (f *Fs) purge(oldOnly bool) error {
go func() {
defer wg.Done()
for object := range toBeDeleted {
fs.Stats.Checking(object.Name)
accounting.Stats.Checking(object.Name)
checkErr(f.deleteByID(object.ID, object.Name))
fs.Stats.DoneChecking(object.Name)
accounting.Stats.DoneChecking(object.Name)
}
}()
}
last := ""
checkErr(f.list("", fs.MaxLevel, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
checkErr(f.list("", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
fs.Stats.Checking(remote)
accounting.Stats.Checking(remote)
if oldOnly && last != remote {
if object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
@@ -817,7 +910,7 @@ func (f *Fs) purge(oldOnly bool) error {
toBeDeleted <- object
}
last = remote
fs.Stats.DoneChecking(remote)
accounting.Stats.DoneChecking(remote)
}
return nil
}))
@@ -841,8 +934,8 @@ func (f *Fs) CleanUp() error {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashSHA1)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.SHA1)
}
// ------------------------------------------------------------
@@ -866,9 +959,9 @@ func (o *Object) Remote() string {
}
// Hash returns the Sha-1 of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashSHA1 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.SHA1 {
return "", hash.ErrUnsupported
}
if o.sha1 == "" {
// Error is logged in readMetaData
@@ -947,7 +1040,7 @@ func (o *Object) readMetaData() (err error) {
maxSearched = maxVersions
}
var info *api.File
err = o.fs.list("", fs.MaxLevel, baseRemote, maxSearched, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
err = o.fs.list("", true, baseRemote, maxSearched, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
if isDirectory {
return nil
}
@@ -1021,7 +1114,7 @@ type openFile struct {
o *Object // Object we are reading for
resp *http.Response // response of the GET
body io.Reader // reading from here
hash hash.Hash // currently accumulating SHA1
hash gohash.Hash // currently accumulating SHA1
bytes int64 // number of bytes read on this connection
eof bool // whether we have read end of file
}
@@ -1066,7 +1159,7 @@ func (file *openFile) Close() (err error) {
// Check the SHA1
receivedSHA1 := file.o.sha1
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
if receivedSHA1 != calculatedSHA1 {
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
}
@@ -1079,10 +1172,9 @@ var _ io.ReadCloser = &openFile{}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
opts := rest.Opts{
Method: "GET",
Absolute: true,
Path: o.fs.info.DownloadURL,
Options: options,
Method: "GET",
RootURL: o.fs.info.DownloadURL,
Options: options,
}
// Download by id if set otherwise by name
if o.id != "" {
@@ -1161,14 +1253,43 @@ func urlEncode(in string) string {
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if *b2Versions {
return errNotWithVersions
}
err = o.fs.Mkdir("")
if err != nil {
return err
}
size := src.Size()
// If a large file upload in chunks - see upload.go
if size >= int64(uploadCutoff) {
if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
buf := o.fs.getUploadBlock()
n, err := io.ReadFull(in, buf)
if err == nil {
bufReader := bufio.NewReader(in)
in = bufReader
_, err = bufReader.Peek(1)
}
if err == nil {
fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(o, in, src)
if err != nil {
o.fs.putUploadBlock(buf)
return err
}
return up.Stream(buf)
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putUploadBlock(buf)
size = int64(n)
in = bytes.NewReader(buf[:n])
} else {
return err
}
} else if size > int64(uploadCutoff) {
up, err := o.fs.newLargeUpload(o, in, src)
if err != nil {
return err
@@ -1177,42 +1298,13 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) {
}
modTime := src.ModTime()
calculatedSha1, _ := src.Hash(fs.HashSHA1)
// If source cannot provide the hash, copy to a temporary file
// and calculate the hash while doing so.
// Then we serve the temporary file.
calculatedSha1, _ := src.Hash(hash.SHA1)
if calculatedSha1 == "" {
// Open a temp file to copy the input
fd, err := ioutil.TempFile("", "rclone-b2-")
if err != nil {
return err
}
_ = os.Remove(fd.Name()) // Delete the file - may not work on Windows
defer func() {
_ = fd.Close() // Ignore error may have been closed already
_ = os.Remove(fd.Name()) // Delete the file - may have been deleted already
}()
// Copy the input while calculating the sha1
hash := sha1.New()
teed := io.TeeReader(in, hash)
n, err := io.Copy(fd, teed)
if err != nil {
return err
}
if n != size {
return errors.Errorf("read %d bytes expecting %d", n, size)
}
calculatedSha1 = fmt.Sprintf("%x", hash.Sum(nil))
// Rewind the temporary file
_, err = fd.Seek(0, 0)
if err != nil {
return err
}
// Set input to temporary file
in = fd
calculatedSha1 = "hex_digits_at_end"
har := newHashAppendingReader(in, sha1.New())
size += int64(har.AdditionalLength())
in = har
}
// Get upload URL
@@ -1279,10 +1371,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) {
// will be returned with the download.
opts := rest.Opts{
Method: "POST",
Absolute: true,
Path: upload.UploadURL,
Body: in,
Method: "POST",
RootURL: upload.UploadURL,
Body: in,
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-File-Name": urlEncode(o.fs.root + o.remote),
@@ -1320,27 +1411,10 @@ func (o *Object) Remove() error {
if *b2Versions {
return errNotWithVersions
}
bucketID, err := o.fs.getBucketID()
if err != nil {
return err
if *b2HardDelete {
return o.fs.deleteByID(o.id, o.fs.root+o.remote)
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_hide_file",
}
var request = api.HideFileRequest{
BucketID: bucketID,
Name: o.fs.root + o.remote,
}
var response api.File
err = o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(&opts, &request, &response)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to delete file")
}
return nil
return o.fs.hide(o.fs.root + o.remote)
}
// MimeType of an Object if known, "" otherwise
@@ -1348,11 +1422,19 @@ func (o *Object) MimeType() string {
return o.mimeType
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.id
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)

17
backend/b2/b2_test.go Normal file
View File

@@ -0,0 +1,17 @@
// Test B2 filesystem interface
package b2_test
import (
"testing"
"github.com/ncw/rclone/backend/b2"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestB2:",
NilObject: (*b2.Object)(nil),
})
}

433
backend/b2/upload.go Normal file
View File

@@ -0,0 +1,433 @@
// Upload large files for b2
//
// Docs - https://www.backblaze.com/b2/docs/large_files.html
package b2
import (
"bytes"
"crypto/sha1"
"encoding/hex"
"fmt"
gohash "hash"
"io"
"strings"
"sync"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
)
type hashAppendingReader struct {
h gohash.Hash
in io.Reader
hexSum string
hexReader io.Reader
}
// Read returns bytes all bytes from the original reader, then the hex sum
// of what was read so far, then EOF.
func (har *hashAppendingReader) Read(b []byte) (int, error) {
if har.hexReader == nil {
n, err := har.in.Read(b)
if err == io.EOF {
har.in = nil // allow GC
err = nil // allow reading hexSum before EOF
har.hexSum = hex.EncodeToString(har.h.Sum(nil))
har.hexReader = strings.NewReader(har.hexSum)
}
return n, err
}
return har.hexReader.Read(b)
}
// AdditionalLength returns how many bytes the appended hex sum will take up.
func (har *hashAppendingReader) AdditionalLength() int {
return hex.EncodedLen(har.h.Size())
}
// HexSum returns the hash sum as hex. It's only available after the original
// reader has EOF'd. It's an empty string before that.
func (har *hashAppendingReader) HexSum() string {
return har.hexSum
}
// newHashAppendingReader takes a Reader and a Hash and will append the hex sum
// after the original reader reaches EOF. The increased size depends on the
// given hash, which may be queried through AdditionalLength()
func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
withHash := io.TeeReader(in, h)
return &hashAppendingReader{h: h, in: withHash}
}
// largeUpload is used to control the upload of large files which need chunking
type largeUpload struct {
f *Fs // parent Fs
o *Object // object being uploaded
in io.Reader // read the data from here
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
}
// newLargeUpload starts an upload of object o from in with metadata in src
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
remote := o.remote
size := src.Size()
parts := int64(0)
sha1SliceSize := int64(maxParts)
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
} else {
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
}
if parts > maxParts {
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
}
sha1SliceSize = parts
}
modTime := src.ModTime()
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
}
bucketID, err := f.getBucketID()
if err != nil {
return nil, err
}
var request = api.StartLargeFileRequest{
BucketID: bucketID,
Name: o.fs.root + remote,
ContentType: fs.MimeType(src),
Info: map[string]string{
timeKey: timeString(modTime),
},
}
// Set the SHA1 if known
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
in, wrap := accounting.UnWrap(in)
up = &largeUpload{
f: f,
o: o,
in: in,
wrap: wrap,
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
}
return up, nil
}
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
//
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
}
return upload, nil
}
// returnUploadURL returns the UploadURL to the cache
func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
if upload == nil {
return
}
up.uploadMu.Lock()
up.uploads = append(up.uploads, upload)
up.uploadMu.Unlock()
}
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func (up *largeUpload) clearUploadURL() {
up.uploadMu.Lock()
up.uploads = nil
up.uploadMu.Unlock()
}
// Transfer a chunk
func (up *largeUpload) transferChunk(part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL
upload, err := up.getUploadURL()
if err != nil {
return false, err
}
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
size := int64(len(body)) + int64(in.AdditionalLength())
// Authorization
//
// An upload authorization token, from b2_get_upload_part_url.
//
// X-Bz-Part-Number
//
// A number from 1 to 10000. The parts uploaded for one file
// must have contiguous numbers, starting with 1.
//
// Content-Length
//
// The number of bytes in the file being uploaded. Note that
// this header is required; you cannot leave it out and just
// use chunked encoding. The minimum size of every part but
// the last one is 100MB.
//
// X-Bz-Content-Sha1
//
// The SHA1 checksum of the this part of the file. B2 will
// check this when the part is uploaded, to make sure that the
// data arrived correctly. The same SHA1 checksum must be
// passed to b2_finish_large_file.
opts := rest.Opts{
Method: "POST",
RootURL: upload.UploadURL,
Body: up.wrap(in),
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: "hex_digits_at_end",
},
ContentLength: &size,
}
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
retry, err := up.f.shouldRetry(resp, err)
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
// On retryable error clear PartUploadURL
if retry {
fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err)
upload = nil
}
up.returnUploadURL(upload)
up.sha1s[part-1] = in.HexSum()
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done sending chunk %d", part)
}
return err
}
// finish closes off the large upload
func (up *largeUpload) finish() error {
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
opts := rest.Opts{
Method: "POST",
Path: "/b2_finish_large_file",
}
var request = api.FinishLargeFileRequest{
ID: up.id,
SHA1s: up.sha1s,
}
var response api.FileInfo
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return err
}
return up.o.decodeMetaDataFileInfo(&response)
}
// cancel aborts the large upload
func (up *largeUpload) cancel() error {
opts := rest.Opts{
Method: "POST",
Path: "/b2_cancel_large_file",
}
var request = api.CancelLargeFileRequest{
ID: up.id,
}
var response api.CancelLargeFileResponse
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
return err
}
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1)
go func(part int64, buf []byte) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(part, buf)
if err != nil {
select {
case errs <- err:
default:
}
}
}(part, buf)
}
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel()
if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
}
return up.finish()
}
// Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF.
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
errs := make(chan error, 1)
hasMoreParts := true
var wg sync.WaitGroup
// Transfer initial chunk
up.size = int64(len(initialUploadBlock))
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
outer:
for part := int64(2); hasMoreParts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
// Get a block of memory
buf := up.f.getUploadBlock()
// Read the chunk
var n int
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
err = nil
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putUploadBlock(buf)
err = nil
break outer
} else if err != nil {
// other kinds of errors indicate failure
up.f.putUploadBlock(buf)
break outer
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
break outer
}
// Transfer the chunk
up.managedTransferChunk(&wg, errs, part, buf)
}
wg.Wait()
up.sha1s = up.sha1s[:up.parts]
return up.finishOrCancelOnError(err, errs)
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload() error {
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size
errs := make(chan error, 1)
var wg sync.WaitGroup
var err error
outer:
for part := int64(1); part <= up.parts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
reqSize := remaining
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
// Get a block of memory
buf := up.f.getUploadBlock()[:reqSize]
// Read the chunk
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putUploadBlock(buf)
break outer
}
// Transfer the chunk
up.managedTransferChunk(&wg, errs, part, buf)
remaining -= reqSize
}
wg.Wait()
return up.finishOrCancelOnError(err, errs)
}

192
backend/box/api/types.go Normal file
View File

@@ -0,0 +1,192 @@
// Package api has type definitions for box
//
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
package api
import (
"encoding/json"
"fmt"
"time"
)
const (
// 2017-05-03T07:26:10-07:00
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents represents date and time information for the
// box API, by using RFC3339
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).Format(timeFormat)
return []byte(timeString), nil
}
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// Error is returned from box when things go wrong
type Error struct {
Type string `json:"type"`
Status int `json:"status"`
Code string `json:"code"`
ContextInfo json.RawMessage
HelpURL string `json:"help_url"`
Message string `json:"message"`
RequestID string `json:"request_id"`
}
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
if e.Message != "" {
out += ": " + e.Message
}
if e.ContextInfo != nil {
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
}
return out
}
// Check Error statisfies the error interface
var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
// Types of things in Item
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
ItemStatusActive = "active"
ItemStatusTrashed = "trashed"
ItemStatusDeleted = "deleted"
)
// Item describes a folder or a file as returned by Get Folder Items and others
type Item struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
}
// ModTime returns the modification time of the item
func (i *Item) ModTime() (t time.Time) {
t = time.Time(i.ContentModifiedAt)
if t.IsZero() {
t = time.Time(i.ModifiedAt)
}
return t
}
// FolderItems is returned from the GetFolderItems call
type FolderItems struct {
TotalCount int `json:"total_count"`
Entries []Item `json:"entries"`
Offset int `json:"offset"`
Limit int `json:"limit"`
Order []struct {
By string `json:"by"`
Direction string `json:"direction"`
} `json:"order"`
}
// Parent defined the ID of the parent directory
type Parent struct {
ID string `json:"id"`
}
// CreateFolder is the request for Create Folder
type CreateFolder struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
}
// UploadFile is the request for Upload File
type UploadFile struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
}
// UpdateFileModTime is used in Update File Info
type UpdateFileModTime struct {
ContentModifiedAt Time `json:"content_modified_at"`
}
// UpdateFileMove is the request for Upload File to change name and parent
type UpdateFileMove struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
}
// CopyFile is the request for Copy File
type CopyFile struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
}
// UploadSessionRequest is uses in Create Upload Session
type UploadSessionRequest struct {
FolderID string `json:"folder_id,omitempty"` // don't pass for update
FileSize int64 `json:"file_size"`
FileName string `json:"file_name,omitempty"` // optional for update
}
// UploadSessionResponse is returned from Create Upload Session
type UploadSessionResponse struct {
TotalParts int `json:"total_parts"`
PartSize int64 `json:"part_size"`
SessionEndpoints struct {
ListParts string `json:"list_parts"`
Commit string `json:"commit"`
UploadPart string `json:"upload_part"`
Status string `json:"status"`
Abort string `json:"abort"`
} `json:"session_endpoints"`
SessionExpiresAt Time `json:"session_expires_at"`
ID string `json:"id"`
Type string `json:"type"`
NumPartsProcessed int `json:"num_parts_processed"`
}
// Part defines the return from upload part call which are passed to commit upload also
type Part struct {
PartID string `json:"part_id"`
Offset int `json:"offset"`
Size int `json:"size"`
Sha1 string `json:"sha1"`
}
// UploadPartResponse is returned from the upload part call
type UploadPartResponse struct {
Part Part `json:"part"`
}
// CommitUpload is used in the Commit Upload call
type CommitUpload struct {
Parts []Part `json:"parts"`
Attributes struct {
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
} `json:"attributes"`
}

1067
backend/box/box.go Normal file

File diff suppressed because it is too large Load Diff

17
backend/box/box_test.go Normal file
View File

@@ -0,0 +1,17 @@
// Test Box filesystem interface
package box_test
import (
"testing"
"github.com/ncw/rclone/backend/box"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestBox:",
NilObject: (*box.Object)(nil),
})
}

273
backend/box/upload.go Normal file
View File

@@ -0,0 +1,273 @@
// multpart upload for box
package box
import (
"bytes"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"sync"
"time"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
)
// createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/files/upload_sessions",
RootURL: uploadURL,
}
request := api.UploadSessionRequest{
FileSize: size,
}
// If object has an ID then it is existing so create a new version
if o.id != "" {
opts.Path = "/files/" + o.id + "/upload_sessions"
} else {
opts.Path = "/files/upload_sessions"
request.FolderID = directoryID
request.FileName = replaceReservedChars(leaf)
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
return shouldRetry(resp, err)
})
return
}
// sha1Digest produces a digest using sha1 as per RFC3230
func sha1Digest(digest []byte) string {
return "sha=" + base64.StdEncoding.EncodeToString(digest)
}
// uploadPart uploads a part in an upload session
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk)
opts := rest.Opts{
Method: "PUT",
Path: "/files/upload_sessions/" + SessionID,
RootURL: uploadURL,
ContentType: "application/octet-stream",
ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum[:]),
},
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
opts.Body = wrap(bytes.NewReader(chunk))
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return response, nil
}
// commitUpload finishes an upload session
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/files/upload_sessions/" + SessionID + "/commit",
RootURL: uploadURL,
ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum),
},
}
request := api.CommitUpload{
Parts: parts,
}
request.Attributes.ContentModifiedAt = api.Time(modTime)
request.Attributes.ContentCreatedAt = api.Time(modTime)
var body []byte
var resp *http.Response
maxTries := fs.Config.LowLevelRetries
const defaultDelay = 10
var tries int
outer:
for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
if err != nil {
return shouldRetry(resp, err)
}
body, err = rest.ReadBody(resp)
return shouldRetry(resp, err)
})
delay := defaultDelay
why := "unknown"
if err != nil {
// Sometimes we get 400 Error with
// parts_mismatch immediately after uploading
// the last part. Ignore this error and wait.
if boxErr, ok := err.(*api.Error); ok && boxErr.Code == "parts_mismatch" {
why = err.Error()
} else {
return nil, err
}
} else {
switch resp.StatusCode {
case http.StatusOK, http.StatusCreated:
break outer
case http.StatusAccepted:
why = "not ready yet"
delayString := resp.Header.Get("Retry-After")
if delayString != "" {
delay, err = strconv.Atoi(delayString)
if err != nil {
fs.Debugf(o, "Couldn't decode Retry-After header %q: %v", delayString, err)
delay = defaultDelay
}
}
default:
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
}
}
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
time.Sleep(time.Duration(delay) * time.Second)
}
if tries >= maxTries {
return nil, errors.New("too many tries to commit multipart upload - increase --low-level-retries")
}
err = json.Unmarshal(body, &result)
if err != nil {
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
}
return result, nil
}
// abortUpload cancels an upload session
func (o *Object) abortUpload(SessionID string) (err error) {
opts := rest.Opts{
Method: "DELETE",
Path: "/files/upload_sessions/" + SessionID,
RootURL: uploadURL,
NoResponse: true,
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
return err
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session
session, err := o.createUploadSession(leaf, directoryID, size)
if err != nil {
return errors.Wrap(err, "multipart upload create session failed")
}
chunkSize := session.PartSize
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
// Cancel the session if something went wrong
defer func() {
if err != nil {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(session.ID)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
}
}()
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
in, wrap := accounting.UnWrap(in)
// Upload the chunks
remaining := size
position := int64(0)
parts := make([]api.Part, session.TotalParts)
hash := sha1.New()
errs := make(chan error, 1)
var wg sync.WaitGroup
outer:
for part := 0; part < session.TotalParts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
reqSize := remaining
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
// Make a block of memory
buf := make([]byte, reqSize)
// Read the chunk
_, err = io.ReadFull(in, buf)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to read source")
break outer
}
// Make the global hash (must be done sequentially)
_, _ = hash.Write(buf)
// Transfer the chunk
wg.Add(1)
o.fs.uploadToken.Get()
go func(part int, position int64) {
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
select {
case errs <- err:
default:
}
return
}
parts[part] = partResponse.Part
}(part, position)
// ready for next block
remaining -= chunkSize
position += chunkSize
}
wg.Wait()
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
return err
}
// Finalise the upload session
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize")
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
return errors.Errorf("multipart upload failed %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}

1574
backend/cache/cache.go vendored Normal file

File diff suppressed because it is too large Load Diff

1707
backend/cache/cache_internal_test.go vendored Normal file

File diff suppressed because it is too large Load Diff

78
backend/cache/cache_mount_unix_test.go vendored Normal file
View File

@@ -0,0 +1,78 @@
// +build !plan9,!windows
package cache_test
import (
"os"
"testing"
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)
func (r *run) mountFs(t *testing.T, f fs.Fs) {
device := f.Name() + ":" + f.Root()
var options = []fuse.MountOption{
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
fuse.Subtype("rclone"),
fuse.FSName(device), fuse.VolumeName(device),
fuse.NoAppleDouble(),
fuse.NoAppleXattr(),
//fuse.AllowOther(),
}
err := os.MkdirAll(r.mntDir, os.ModePerm)
require.NoError(t, err)
c, err := fuse.Mount(r.mntDir, options...)
require.NoError(t, err)
filesys := mount.NewFS(f)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
err := server.Serve(filesys)
closeErr := c.Close()
if err == nil {
err = closeErr
}
r.unmountRes <- err
}()
// check if the mount process has an error to report
<-c.Ready
require.NoError(t, c.MountError)
r.unmountFn = func() error {
// Shutdown the VFS
filesys.VFS.Shutdown()
return fuse.Unmount(r.mntDir)
}
r.vfs = filesys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -0,0 +1,124 @@
// +build windows
package cache_test
import (
"fmt"
"os"
"testing"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/ncw/rclone/cmd/cmount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
// waitFor runs fn() until it returns true or the timeout expires
func waitFor(fn func() bool) (ok bool) {
const totalWait = 10 * time.Second
const individualWait = 10 * time.Millisecond
for i := 0; i < int(totalWait/individualWait); i++ {
ok = fn()
if ok {
return ok
}
time.Sleep(individualWait)
}
return false
}
func (r *run) mountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
device := f.Name() + ":" + f.Root()
options := []string{
"-o", "fsname=" + device,
"-o", "subtype=rclone",
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
"-o", "uid=-1",
"-o", "gid=-1",
"-o", "allow_other",
// This causes FUSE to supply O_TRUNC with the Open
// call which is more efficient for cmount. However
// it does not work with cgofuse on Windows with
// WinFSP so cmount must work with or without it.
"-o", "atomic_o_trunc",
"--FileSystemName=rclone",
}
fsys := cmount.NewFS(f)
host := fuse.NewFileSystemHost(fsys)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
var err error
ok := host.Mount(r.mntDir, options)
if !ok {
err = errors.New("mount failed")
}
r.unmountRes <- err
}()
// unmount
r.unmountFn = func() error {
// Shutdown the VFS
fsys.VFS.Shutdown()
if host.Unmount() {
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err != nil
}) {
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
}
return nil
}
return errors.New("host unmount failed")
}
// Wait for the filesystem to become ready, checking the file
// system didn't blow up before starting
select {
case err := <-r.unmountRes:
require.NoError(t, err)
case <-time.After(time.Second * 3):
}
// Wait for the mount point to be available on Windows
// On Windows the Init signal comes slightly before the mount is ready
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err == nil
}) {
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
}
r.vfs = fsys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

21
backend/cache/cache_test.go vendored Normal file
View File

@@ -0,0 +1,21 @@
// Test Cache filesystem interface
// +build !plan9
package cache_test
import (
"testing"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
})
}

6
backend/cache/cache_unsupported.go vendored Normal file
View File

@@ -0,0 +1,6 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9
package cache

455
backend/cache/cache_upload_test.go vendored Normal file
View File

@@ -0,0 +1,455 @@
// +build !plan9
package cache_test
import (
"math/rand"
"os"
"path"
"strconv"
"testing"
"time"
"fmt"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
}
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
// create some rand test data
testSize := int64(524288000)
testReader := runInstance.randomReader(t, testSize)
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(524416032), ti.Size())
} else {
require.Equal(t, testSize, ti.Size())
}
de1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, de1, 1)
runInstance.completeBackgroundUpload(t, "one", bu)
// check if it was removed from temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
// check if it can be read
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
require.NoError(t, err)
require.Len(t, data2, 1024)
}
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(10485760)
testReader := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
time.Sleep(time.Second * 5)
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
//require.NoError(t, err)
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
require.NoError(t, err)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second/test")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(1048576)
testReader := runInstance.randomReader(t, testSize)
testReader2 := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
require.False(t, os.IsNotExist(err))
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
require.True(t, os.IsNotExist(err))
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("test")
require.NoError(t, err)
minSize := 5242880
maxSize := 10485760
totalFiles := 10
rand.Seed(time.Now().Unix())
lastFile := ""
for i := 0; i < totalFiles; i++ {
size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
require.NoError(t, err)
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
if runInstance.wrappedIsExternal && i < totalFiles-1 {
time.Sleep(time.Second * 3)
}
lastFile = remote
}
// check if cache lists all files, likely temp upload didn't finish yet
de1, err := runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
// wait for background uploader to do its thing
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
// retry until we have no more temp files and fail if they don't go down to 0
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
require.True(t, os.IsNotExist(err))
// check if cache lists all files
de1, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
}
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test DirMove - allowed
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("second/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.NoError(t, err)
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.Error(t, err)
var started bool
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
require.NoError(t, err)
require.False(t, started)
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Rmdir - allowed
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.False(t, started)
require.NoError(t, err)
// test Move/Rename -- allowed
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.NoError(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/second")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update -- allowed
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err)
obj2, err := rootFs.NewObject("test/one")
require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2))
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(67), tmpInfo.Size())
} else {
require.Equal(t, int64(len(data2)), tmpInfo.Size())
}
// test SetModTime -- allowed
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
require.NotEqual(t, secondModTime, firstModTime)
require.NotEqual(t, time.Time{}, firstModTime)
require.NotEqual(t, time.Time{}, secondModTime)
}
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
require.NoError(t, err)
// test DirMove
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.Error(t, err)
}
// test Rmdir
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test Move/Rename
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.Error(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/second")
require.Error(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.Error(t, err)
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove
err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update - this seems to work. Why? FIXME
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
// require.Equal(t, "one content", string(data2))
//
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
// require.NoError(t, err)
// if runInstance.rootIsCrypt {
// require.Equal(t, int64(67), tmpInfo.Size())
// } else {
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
// }
//})
//require.Error(t, err)
// test SetModTime -- seems to work cause of previous
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//require.Equal(t, secondModTime, firstModTime)
//require.NotEqual(t, time.Time{}, firstModTime)
//require.NotEqual(t, time.Time{}, secondModTime)
}

130
backend/cache/directory.go vendored Normal file
View File

@@ -0,0 +1,130 @@
// +build !plan9
package cache
import (
"time"
"path"
"github.com/ncw/rclone/fs"
)
// Directory is a generic dir that stores basic information about it
type Directory struct {
fs.Directory `json:"-"`
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the directory
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheItems int64 `json:"items"` // number of objects or -1 for unknown
CacheType string `json:"cacheType"` // object type
CacheTs *time.Time `json:",omitempty"`
}
// NewDirectory builds an empty dir which will be used to unmarshal data in it
func NewDirectory(f *Fs, remote string) *Directory {
cd := ShallowDirectory(f, remote)
t := time.Now()
cd.CacheTs = &t
return cd
}
// ShallowDirectory builds an empty dir which will be used to unmarshal data in it
func ShallowDirectory(f *Fs, remote string) *Directory {
var cd *Directory
fullRemote := cleanPath(path.Join(f.Root(), remote))
// build a new one
dir := cleanPath(path.Dir(fullRemote))
name := cleanPath(path.Base(fullRemote))
cd = &Directory{
CacheFs: f,
Name: name,
Dir: dir,
CacheModTime: time.Now().UnixNano(),
CacheSize: 0,
CacheItems: 0,
CacheType: "Directory",
}
return cd
}
// DirectoryFromOriginal builds one from a generic fs.Directory
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
var cd *Directory
fullRemote := path.Join(f.Root(), d.Remote())
dir := cleanPath(path.Dir(fullRemote))
name := cleanPath(path.Base(fullRemote))
t := time.Now()
cd = &Directory{
Directory: d,
CacheFs: f,
Name: name,
Dir: dir,
CacheModTime: d.ModTime().UnixNano(),
CacheSize: d.Size(),
CacheItems: d.Items(),
CacheType: "Directory",
CacheTs: &t,
}
return cd
}
// Fs returns its FS info
func (d *Directory) Fs() fs.Info {
return d.CacheFs
}
// String returns a human friendly name for this object
func (d *Directory) String() string {
if d == nil {
return "<nil>"
}
return d.Remote()
}
// Remote returns the remote path
func (d *Directory) Remote() string {
return d.CacheFs.cleanRootFromPath(d.abs())
}
// abs returns the absolute path to the dir
func (d *Directory) abs() string {
return cleanPath(path.Join(d.Dir, d.Name))
}
// parentRemote returns the absolute path parent remote
func (d *Directory) parentRemote() string {
absPath := d.abs()
if absPath == "" {
return ""
}
return cleanPath(path.Dir(absPath))
}
// ModTime returns the cached ModTime
func (d *Directory) ModTime() time.Time {
return time.Unix(0, d.CacheModTime)
}
// Size returns the cached Size
func (d *Directory) Size() int64 {
return d.CacheSize
}
// Items returns the cached Items
func (d *Directory) Items() int64 {
return d.CacheItems
}
var (
_ fs.Directory = (*Directory)(nil)
)

668
backend/cache/handle.go vendored Normal file
View File

@@ -0,0 +1,668 @@
// +build !plan9
package cache
import (
"fmt"
"io"
"sync"
"time"
"path"
"runtime"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/pkg/errors"
)
var uploaderMap = make(map[string]*backgroundWriter)
var uploaderMapMx sync.Mutex
// initBackgroundUploader returns a single instance
func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
// write lock to create one
uploaderMapMx.Lock()
defer uploaderMapMx.Unlock()
if b, ok := uploaderMap[fs.String()]; ok {
// if it was already started we close it so that it can be started again
if b.running {
b.close()
} else {
return b, nil
}
}
bb := newBackgroundWriter(fs)
uploaderMap[fs.String()] = bb
return uploaderMap[fs.String()], nil
}
// Handle is managing the read/write/seek operations on an open handle
type Handle struct {
cachedObject *Object
cfs *Fs
memory *Memory
preloadQueue chan int64
preloadOffset int64
offset int64
seenOffsets map[int64]bool
mu sync.Mutex
confirmReading chan bool
UseMemory bool
workers []*worker
closed bool
reading bool
}
// NewObjectHandle returns a new Handle for an existing Object
func NewObjectHandle(o *Object, cfs *Fs) *Handle {
r := &Handle{
cachedObject: o,
cfs: cfs,
offset: 0,
preloadOffset: -1, // -1 to trigger the first preload
UseMemory: cfs.chunkMemory,
reading: false,
}
r.seenOffsets = make(map[int64]bool)
r.memory = NewMemory(-1)
// create a larger buffer to queue up requests
r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10)
r.confirmReading = make(chan bool)
r.startReadWorkers()
return r
}
// cacheFs is a convenience method to get the parent cache FS of the object's manager
func (r *Handle) cacheFs() *Fs {
return r.cfs
}
// storage is a convenience method to get the persistent storage of the object's manager
func (r *Handle) storage() *Persistent {
return r.cacheFs().cache
}
// String representation of this reader
func (r *Handle) String() string {
return r.cachedObject.abs()
}
// startReadWorkers will start the worker pool
func (r *Handle) startReadWorkers() {
if r.hasAtLeastOneWorker() {
return
}
totalWorkers := r.cacheFs().totalWorkers
if r.cacheFs().plexConnector.isConfigured() {
if !r.cacheFs().plexConnector.isConnected() {
err := r.cacheFs().plexConnector.authenticate()
if err != nil {
fs.Errorf(r, "failed to authenticate to Plex: %v", err)
}
}
if r.cacheFs().plexConnector.isConnected() {
totalWorkers = 1
}
}
r.scaleWorkers(totalWorkers)
}
// scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) {
current := len(r.workers)
if current == desired {
return
}
if current > desired {
// scale in gracefully
for i := 0; i < current-desired; i++ {
r.preloadQueue <- -1
}
} else {
// scale out
for i := 0; i < desired-current; i++ {
w := &worker{
r: r,
ch: r.preloadQueue,
id: current + i,
}
go w.run()
r.workers = append(r.workers, w)
}
}
// ignore first scale out from 0
if current != 0 {
fs.Debugf(r, "scale workers to %v", desired)
}
}
func (r *Handle) confirmExternalReading() {
// if we have a max value of workers
// then we skip this step
if len(r.workers) > 1 ||
!r.cacheFs().plexConnector.isConfigured() {
return
}
if !r.cacheFs().plexConnector.isPlaying(r.cachedObject) {
return
}
fs.Infof(r, "confirmed reading by external reader")
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
}
// queueOffset will send an offset to the workers if it's different from the last one
func (r *Handle) queueOffset(offset int64) {
if offset != r.preloadOffset {
// clean past in-memory chunks
if r.UseMemory {
go r.memory.CleanChunksByNeed(offset)
}
r.confirmExternalReading()
r.preloadOffset = offset
// clear the past seen chunks
// they will remain in our persistent storage but will be removed from transient
// so they need to be picked up by a worker
for k := range r.seenOffsets {
if k < offset {
r.seenOffsets[k] = false
}
}
for i := 0; i < len(r.workers); i++ {
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
if o < 0 || o >= r.cachedObject.Size() {
continue
}
if v, ok := r.seenOffsets[o]; ok && v {
continue
}
r.seenOffsets[o] = true
r.preloadQueue <- o
}
}
}
func (r *Handle) hasAtLeastOneWorker() bool {
oneWorker := false
for i := 0; i < len(r.workers); i++ {
if r.workers[i].isRunning() {
oneWorker = true
}
}
return oneWorker
}
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
// it can be from transient or persistent cache
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
var data []byte
var err error
// we calculate the modulus of the requested offset with the size of a chunk
offset := chunkStart % r.cacheFs().chunkSize
// we align the start offset of the first chunk to a likely chunk in the storage
chunkStart = chunkStart - offset
r.queueOffset(chunkStart)
found := false
if r.UseMemory {
data, err = r.memory.GetChunk(r.cachedObject, chunkStart)
if err == nil {
found = true
}
}
if !found {
// we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times
for i := 0; i < r.cacheFs().readRetries*8; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil {
found = true
break
}
fs.Debugf(r, "%v: chunk retry storage: %v", chunkStart, i)
time.Sleep(time.Millisecond * 500)
}
}
// not found in ram or
// the worker didn't managed to download the chunk in time so we abort and close the stream
if err != nil || len(data) == 0 || !found {
if !r.hasAtLeastOneWorker() {
fs.Errorf(r, "out of workers")
return nil, io.ErrUnexpectedEOF
}
return nil, errors.Errorf("chunk not found %v", chunkStart)
}
// first chunk will be aligned with the start
if offset > 0 {
if offset > int64(len(data)) {
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
return nil, io.ErrUnexpectedEOF
}
data = data[int(offset):]
}
return data, nil
}
// Read a chunk from storage or len(p)
func (r *Handle) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
var buf []byte
// first reading
if !r.reading {
r.reading = true
}
// reached EOF
if r.offset >= r.cachedObject.Size() {
return 0, io.EOF
}
currentOffset := r.offset
buf, err = r.getChunk(currentOffset)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(r, "(%v/%v) error (%v) response", currentOffset, r.cachedObject.Size(), err)
}
if len(buf) == 0 && err != io.ErrUnexpectedEOF {
return 0, io.EOF
}
readSize := copy(p, buf)
newOffset := currentOffset + int64(readSize)
r.offset = newOffset
return readSize, err
}
// Close will tell the workers to stop
func (r *Handle) Close() error {
r.mu.Lock()
defer r.mu.Unlock()
if r.closed {
return errors.New("file already closed")
}
close(r.preloadQueue)
r.closed = true
// wait for workers to complete their jobs before returning
waitCount := 3
for i := 0; i < len(r.workers); i++ {
waitIdx := 0
for r.workers[i].isRunning() && waitIdx < waitCount {
time.Sleep(time.Second)
waitIdx++
}
}
r.memory.db.Flush()
fs.Debugf(r, "cache reader closed %v", r.offset)
return nil
}
// Seek will move the current offset based on whence and instruct the workers to move there too
func (r *Handle) Seek(offset int64, whence int) (int64, error) {
r.mu.Lock()
defer r.mu.Unlock()
var err error
switch whence {
case io.SeekStart:
fs.Debugf(r, "moving offset set from %v to %v", r.offset, offset)
r.offset = offset
case io.SeekCurrent:
fs.Debugf(r, "moving offset cur from %v to %v", r.offset, r.offset+offset)
r.offset += offset
case io.SeekEnd:
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
r.offset = r.cachedObject.Size() + offset
default:
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
}
chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
if chunkStart >= r.cacheFs().chunkSize {
chunkStart = chunkStart - r.cacheFs().chunkSize
}
r.queueOffset(chunkStart)
return r.offset, err
}
type worker struct {
r *Handle
ch <-chan int64
rc io.ReadCloser
id int
running bool
mu sync.Mutex
}
// String is a representation of this worker
func (w *worker) String() string {
return fmt.Sprintf("worker-%v <%v>", w.id, w.r.cachedObject.Name)
}
// reader will return a reader depending on the capabilities of the source reader:
// - if it supports seeking it will seek to the desired offset and return the same reader
// - if it doesn't support seeking it will close a possible existing one and open at the desired offset
// - if there's no reader associated with this worker, it will create one
func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error) {
var err error
r := w.rc
if w.rc == nil {
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
})
if err != nil {
return nil, err
}
return r, nil
}
if !closeOpen {
if do, ok := r.(fs.RangeSeeker); ok {
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
return r, err
} else if do, ok := r.(io.Seeker); ok {
_, err = do.Seek(offset, io.SeekStart)
return r, err
}
}
_ = w.rc.Close()
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
if err != nil {
return nil, err
}
return r, nil
})
}
func (w *worker) isRunning() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.running
}
func (w *worker) setRunning(f bool) {
w.mu.Lock()
defer w.mu.Unlock()
w.running = f
}
// run is the main loop for the worker which receives offsets to preload
func (w *worker) run() {
var err error
var data []byte
defer w.setRunning(false)
defer func() {
if w.rc != nil {
_ = w.rc.Close()
w.setRunning(false)
}
}()
for {
chunkStart, open := <-w.ch
w.setRunning(true)
if chunkStart < 0 || !open {
break
}
// skip if it exists
if w.r.UseMemory {
if w.r.memory.HasChunk(w.r.cachedObject, chunkStart) {
continue
}
// add it in ram if it's in the persistent storage
data, err = w.r.storage().GetChunk(w.r.cachedObject, chunkStart)
if err == nil {
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
if err != nil {
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
} else {
continue
}
}
} else {
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
}
}
chunkEnd := chunkStart + w.r.cacheFs().chunkSize
// TODO: Remove this comment if it proves to be reliable for #1896
//if chunkEnd > w.r.cachedObject.Size() {
// chunkEnd = w.r.cachedObject.Size()
//}
w.download(chunkStart, chunkEnd, 0)
}
}
func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
var err error
var data []byte
// stop retries
if retry >= w.r.cacheFs().readRetries {
return
}
// back-off between retries
if retry > 0 {
time.Sleep(time.Second * time.Duration(retry))
}
closeOpen := false
if retry > 0 {
closeOpen = true
}
w.rc, err = w.reader(chunkStart, chunkEnd, closeOpen)
// we seem to be getting only errors so we abort
if err != nil {
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(true)
if err != nil {
fs.Errorf(w, "%v", err)
}
w.download(chunkStart, chunkEnd, retry+1)
return
}
data = make([]byte, chunkEnd-chunkStart)
var sourceRead int
sourceRead, err = io.ReadFull(w.rc, data)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(true)
if err != nil {
fs.Errorf(w, "%v", err)
}
w.download(chunkStart, chunkEnd, retry+1)
return
}
data = data[:sourceRead] // reslice to remove extra garbage
if err == io.ErrUnexpectedEOF {
fs.Debugf(w, "partial downloaded chunk %v", fs.SizeSuffix(chunkStart))
} else {
fs.Debugf(w, "downloaded chunk %v", chunkStart)
}
if w.r.UseMemory {
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
if err != nil {
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
}
}
err = w.r.storage().AddChunk(w.r.cachedObject.abs(), data, chunkStart)
if err != nil {
fs.Errorf(w, "failed caching chunk in storage %v: %v", chunkStart, err)
}
}
const (
// BackgroundUploadStarted is a state for a temp file that has started upload
BackgroundUploadStarted = iota
// BackgroundUploadCompleted is a state for a temp file that has completed upload
BackgroundUploadCompleted
// BackgroundUploadError is a state for a temp file that has an error upload
BackgroundUploadError
)
// BackgroundUploadState is an entity that maps to an existing file which is stored on the temp fs
type BackgroundUploadState struct {
Remote string
Status int
Error error
}
type backgroundWriter struct {
fs *Fs
stateCh chan int
running bool
notifyCh chan BackgroundUploadState
mu sync.Mutex
}
func newBackgroundWriter(f *Fs) *backgroundWriter {
b := &backgroundWriter{
fs: f,
stateCh: make(chan int),
notifyCh: make(chan BackgroundUploadState),
}
return b
}
func (b *backgroundWriter) close() {
b.stateCh <- 2
b.mu.Lock()
defer b.mu.Unlock()
b.running = false
}
func (b *backgroundWriter) pause() {
b.stateCh <- 1
}
func (b *backgroundWriter) play() {
b.stateCh <- 0
}
func (b *backgroundWriter) isRunning() bool {
b.mu.Lock()
defer b.mu.Unlock()
return b.running
}
func (b *backgroundWriter) notify(remote string, status int, err error) {
state := BackgroundUploadState{
Remote: remote,
Status: status,
Error: err,
}
select {
case b.notifyCh <- state:
fs.Debugf(remote, "notified background upload state: %v", state.Status)
default:
}
}
func (b *backgroundWriter) run() {
state := 0
for {
b.mu.Lock()
b.running = true
b.mu.Unlock()
select {
case s := <-b.stateCh:
state = s
default:
//
}
switch state {
case 1:
runtime.Gosched()
time.Sleep(time.Millisecond * 500)
continue
case 2:
return
}
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait)
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
time.Sleep(time.Second)
continue
}
remote := b.fs.cleanRootFromPath(absPath)
b.notify(remote, BackgroundUploadStarted, nil)
fs.Infof(remote, "background upload: started upload")
err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
if err != nil {
b.notify(remote, BackgroundUploadError, err)
_ = b.fs.cache.rollbackPendingUpload(absPath)
fs.Errorf(remote, "background upload: %v", err)
continue
}
// clean empty dirs up to root
thisDir := cleanPath(path.Dir(remote))
for thisDir != "" {
thisList, err := b.fs.tempFs.List(thisDir)
if err != nil {
break
}
if len(thisList) > 0 {
break
}
err = b.fs.tempFs.Rmdir(thisDir)
fs.Debugf(thisDir, "cleaned from temp path")
if err != nil {
break
}
thisDir = cleanPath(path.Dir(thisDir))
}
fs.Infof(remote, "background upload: uploaded entry")
err = b.fs.cache.removePendingUpload(absPath)
if err != nil && !strings.Contains(err.Error(), "pending upload not found") {
fs.Errorf(remote, "background upload: %v", err)
}
parentCd := NewDirectory(b.fs, cleanPath(path.Dir(remote)))
err = b.fs.cache.ExpireDir(parentCd)
if err != nil {
fs.Errorf(parentCd, "background upload: cache expire error: %v", err)
}
b.fs.notifyChangeUpstream(remote, fs.EntryObject)
fs.Infof(remote, "finished background upload")
b.notify(remote, BackgroundUploadCompleted, nil)
}
}
// Check the interfaces are satisfied
var (
_ io.ReadCloser = (*Handle)(nil)
_ io.Seeker = (*Handle)(nil)
)

358
backend/cache/object.go vendored Normal file
View File

@@ -0,0 +1,358 @@
// +build !plan9
package cache
import (
"io"
"path"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
)
const (
objectInCache = "Object"
objectPendingUpload = "TempObject"
)
// Object is a generic file like object that stores basic information about it
type Object struct {
fs.Object `json:"-"`
ParentFs fs.Fs `json:"-"` // parent fs
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
CacheHashes map[hash.Type]string // all supported hashes cached
refreshMutex sync.Mutex
}
// NewObject builds one from a generic fs.Object
func NewObject(f *Fs, remote string) *Object {
fullRemote := path.Join(f.Root(), remote)
dir, name := path.Split(fullRemote)
cacheType := objectInCache
parentFs := f.UnWrap()
if f.tempWritePath != "" {
_, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload
cacheType = objectPendingUpload
parentFs = f.tempFs
fs.Debugf(fullRemote, "pending upload found")
}
}
co := &Object{
ParentFs: parentFs,
CacheFs: f,
Name: cleanPath(name),
Dir: cleanPath(dir),
CacheModTime: time.Now().UnixNano(),
CacheSize: 0,
CacheStorable: false,
CacheType: cacheType,
CacheTs: time.Now(),
}
return co
}
// ObjectFromOriginal builds one from a generic fs.Object
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
var co *Object
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
dir, name := path.Split(fullRemote)
cacheType := objectInCache
parentFs := f.UnWrap()
if f.tempWritePath != "" {
_, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload
cacheType = objectPendingUpload
parentFs = f.tempFs
fs.Debugf(fullRemote, "pending upload found")
}
}
co = &Object{
ParentFs: parentFs,
CacheFs: f,
Name: cleanPath(name),
Dir: cleanPath(dir),
CacheType: cacheType,
CacheTs: time.Now(),
}
co.updateData(o)
return co
}
func (o *Object) updateData(source fs.Object) {
o.Object = source
o.CacheModTime = source.ModTime().UnixNano()
o.CacheSize = source.Size()
o.CacheStorable = source.Storable()
o.CacheTs = time.Now()
o.CacheHashes = make(map[hash.Type]string)
}
// Fs returns its FS info
func (o *Object) Fs() fs.Info {
return o.CacheFs
}
// String returns a human friendly name for this object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
p := path.Join(o.Dir, o.Name)
return o.CacheFs.cleanRootFromPath(p)
}
// abs returns the absolute path to the object
func (o *Object) abs() string {
return path.Join(o.Dir, o.Name)
}
// ModTime returns the cached ModTime
func (o *Object) ModTime() time.Time {
_ = o.refresh()
return time.Unix(0, o.CacheModTime)
}
// Size returns the cached Size
func (o *Object) Size() int64 {
_ = o.refresh()
return o.CacheSize
}
// Storable returns the cached Storable
func (o *Object) Storable() bool {
_ = o.refresh()
return o.CacheStorable
}
// refresh will check if the object info is expired and request the info from source if it is
// all these conditions must be true to ignore a refresh
// 1. cache ts didn't expire yet
// 2. is not pending a notification from the wrapped fs
func (o *Object) refresh() error {
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
isExpired := time.Now().After(o.CacheTs.Add(o.CacheFs.fileAge))
if !isExpired && !isNotified {
return nil
}
return o.refreshFromSource(true)
}
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
func (o *Object) refreshFromSource(force bool) error {
o.refreshMutex.Lock()
defer o.refreshMutex.Unlock()
var err error
var liveObject fs.Object
if o.Object != nil && !force {
return nil
}
if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
} else {
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
}
if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err)
return err
}
o.updateData(liveObject)
o.persist()
return nil
}
// SetModTime sets the ModTime of this object
func (o *Object) SetModTime(t time.Time) error {
if err := o.refreshFromSource(false); err != nil {
return err
}
err := o.Object.SetModTime(t)
if err != nil {
return err
}
o.CacheModTime = t.UnixNano()
o.persist()
fs.Debugf(o, "updated ModTime: %v", t)
return nil
}
// Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
if err := o.refreshFromSource(true); err != nil {
return nil, err
}
var err error
cacheReader := NewObjectHandle(o, o.CacheFs)
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
}
_, err = cacheReader.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
}
return readers.NewLimitedReadCloser(cacheReader, limit), nil
}
// Update will change the object data
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.refreshFromSource(false); err != nil {
return err
}
// pause background uploads if active
if o.CacheFs.tempWritePath != "" {
o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't update", o)
}
}
fs.Debugf(o, "updating object contents with size %v", src.Size())
// FIXME use reliable upload
err := o.Object.Update(in, src, options...)
if err != nil {
fs.Errorf(o, "error updating source: %v", err)
return err
}
// deleting cached chunks and info to be replaced with new ones
_ = o.CacheFs.cache.RemoveObject(o.abs())
// advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
o.CacheModTime = src.ModTime().UnixNano()
o.CacheSize = src.Size()
o.CacheHashes = make(map[hash.Type]string)
o.CacheTs = time.Now()
o.persist()
return nil
}
// Remove deletes the object from both the cache and the source
func (o *Object) Remove() error {
if err := o.refreshFromSource(false); err != nil {
return err
}
// pause background uploads if active
if o.CacheFs.tempWritePath != "" {
o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't delete", o)
}
}
err := o.Object.Remove()
if err != nil {
return err
}
fs.Debugf(o, "removing object")
_ = o.CacheFs.cache.RemoveObject(o.abs())
_ = o.CacheFs.cache.removePendingUpload(o.abs())
parentCd := NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote())))
_ = o.CacheFs.cache.ExpireDir(parentCd)
// advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
return nil
}
// Hash requests a hash of the object and stores in the cache
// since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ht hash.Type) (string, error) {
_ = o.refresh()
if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string)
}
cachedHash, found := o.CacheHashes[ht]
if found {
return cachedHash, nil
}
if err := o.refreshFromSource(false); err != nil {
return "", err
}
liveHash, err := o.Object.Hash(ht)
if err != nil {
return "", err
}
o.CacheHashes[ht] = liveHash
o.persist()
fs.Debugf(o, "object hash cached: %v", liveHash)
return liveHash, nil
}
// persist adds this object to the persistent cache
func (o *Object) persist() *Object {
err := o.CacheFs.cache.AddObject(o)
if err != nil {
fs.Errorf(o, "failed to cache object: %v", err)
}
return o
}
func (o *Object) isTempFile() bool {
_, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
if err != nil {
o.CacheType = objectInCache
return false
}
o.CacheType = objectPendingUpload
return true
}
func (o *Object) tempFileStartedUpload() bool {
started, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
if err != nil {
return false
}
return started
}
var (
_ fs.Object = (*Object)(nil)
)

282
backend/cache/plex.go vendored Normal file
View File

@@ -0,0 +1,282 @@
// +build !plan9
package cache
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"sync"
"bytes"
"io/ioutil"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/patrickmn/go-cache"
"golang.org/x/net/websocket"
)
const (
// defPlexLoginURL is the default URL for Plex login
defPlexLoginURL = "https://plex.tv/users/sign_in.json"
defPlexNotificationURL = "%s/:/websockets/notifications?X-Plex-Token=%s"
)
// PlaySessionStateNotification is part of the API response of Plex
type PlaySessionStateNotification struct {
SessionKey string `json:"sessionKey"`
GUID string `json:"guid"`
Key string `json:"key"`
ViewOffset int64 `json:"viewOffset"`
State string `json:"state"`
TranscodeSession string `json:"transcodeSession"`
}
// NotificationContainer is part of the API response of Plex
type NotificationContainer struct {
Type string `json:"type"`
Size int `json:"size"`
PlaySessionState []PlaySessionStateNotification `json:"PlaySessionStateNotification"`
}
// PlexNotification is part of the API response of Plex
type PlexNotification struct {
Container NotificationContainer `json:"NotificationContainer"`
}
// plexConnector is managing the cache integration with Plex
type plexConnector struct {
url *url.URL
username string
password string
token string
f *Fs
mu sync.Mutex
running bool
runningMu sync.Mutex
stateCache *cache.Cache
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
}
pc := &plexConnector{
f: f,
url: u,
username: username,
password: password,
token: "",
stateCache: cache.New(time.Hour, time.Minute),
}
return pc, nil
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
}
pc := &plexConnector{
f: f,
url: u,
token: token,
stateCache: cache.New(time.Hour, time.Minute),
}
pc.listenWebsocket()
return pc, nil
}
func (p *plexConnector) closeWebsocket() {
p.runningMu.Lock()
defer p.runningMu.Unlock()
fs.Infof("plex", "stopped Plex watcher")
p.running = false
}
func (p *plexConnector) listenWebsocket() {
p.runningMu.Lock()
defer p.runningMu.Unlock()
u := strings.Replace(p.url.String(), "http://", "ws://", 1)
u = strings.Replace(u, "https://", "wss://", 1)
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
"", "http://localhost")
if err != nil {
fs.Errorf("plex", "%v", err)
return
}
p.running = true
go func() {
for {
if !p.isConnected() {
break
}
notif := &PlexNotification{}
err := websocket.JSON.Receive(conn, notif)
if err != nil {
fs.Debugf("plex", "%v", err)
p.closeWebsocket()
break
}
// we're only interested in play events
if notif.Container.Type == "playing" {
// we loop through each of them
for _, v := range notif.Container.PlaySessionState {
// event type of playing
if v.State == "playing" {
// if it's not cached get the details and cache them
if _, found := p.stateCache.Get(v.Key); !found {
req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), v.Key), nil)
if err != nil {
continue
}
p.fillDefaultHeaders(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
continue
}
var data []byte
data, err = ioutil.ReadAll(resp.Body)
if err != nil {
continue
}
p.stateCache.Set(v.Key, data, cache.DefaultExpiration)
}
} else if v.State == "stopped" {
p.stateCache.Delete(v.Key)
}
}
}
}
}()
}
// fillDefaultHeaders will add common headers to requests
func (p *plexConnector) fillDefaultHeaders(req *http.Request) {
req.Header.Add("X-Plex-Client-Identifier", fmt.Sprintf("rclone (%v)", p.f.String()))
req.Header.Add("X-Plex-Product", fmt.Sprintf("rclone (%v)", p.f.Name()))
req.Header.Add("X-Plex-Version", fs.Version)
req.Header.Add("Accept", "application/json")
if p.token != "" {
req.Header.Add("X-Plex-Token", p.token)
}
}
// authenticate will generate a token based on a username/password
func (p *plexConnector) authenticate() error {
p.mu.Lock()
defer p.mu.Unlock()
form := url.Values{}
form.Set("user[login]", p.username)
form.Add("user[password]", p.password)
req, err := http.NewRequest("POST", defPlexLoginURL, strings.NewReader(form.Encode()))
if err != nil {
return err
}
p.fillDefaultHeaders(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return fmt.Errorf("failed to obtain token: %v", err)
}
tokenGen, ok := get(data, "user", "authToken")
if !ok {
return fmt.Errorf("failed to obtain token: %v", data)
}
token, ok := tokenGen.(string)
if !ok {
return fmt.Errorf("failed to obtain token: %v", data)
}
p.token = token
if p.token != "" {
config.FileSet(p.f.Name(), "plex_token", p.token)
config.SaveConfig()
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
}
p.listenWebsocket()
return nil
}
// isConnected checks if this rclone is authenticated to Plex
func (p *plexConnector) isConnected() bool {
p.runningMu.Lock()
defer p.runningMu.Unlock()
return p.running
}
// isConfigured checks if this rclone is configured to use a Plex server
func (p *plexConnector) isConfigured() bool {
return p.url != nil
}
func (p *plexConnector) isPlaying(co *Object) bool {
var err error
if !p.isConnected() {
p.listenWebsocket()
}
remote := co.Remote()
if cr, yes := p.f.isWrappedByCrypt(); yes {
remote, err = cr.DecryptFileName(co.Remote())
if err != nil {
fs.Debugf("plex", "can not decrypt wrapped file: %v", err)
return false
}
}
isPlaying := false
for _, v := range p.stateCache.Items() {
if bytes.Contains(v.Object.([]byte), []byte(remote)) {
isPlaying = true
break
}
}
return isPlaying
}
// adapted from: https://stackoverflow.com/a/28878037 (credit)
func get(m interface{}, path ...interface{}) (interface{}, bool) {
for _, p := range path {
switch idx := p.(type) {
case string:
if mm, ok := m.(map[string]interface{}); ok {
if val, found := mm[idx]; found {
m = val
continue
}
}
return nil, false
case int:
if mm, ok := m.([]interface{}); ok {
if len(mm) > idx {
m = mm[idx]
continue
}
}
return nil, false
}
}
return m, true
}

98
backend/cache/storage_memory.go vendored Normal file
View File

@@ -0,0 +1,98 @@
// +build !plan9
package cache
import (
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
)
// Memory is a wrapper of transient storage for a go-cache store
type Memory struct {
db *cache.Cache
}
// NewMemory builds this cache storage
// defaultExpiration will set the expiry time of chunks in this storage
func NewMemory(defaultExpiration time.Duration) *Memory {
mem := &Memory{}
err := mem.Connect(defaultExpiration)
if err != nil {
fs.Errorf("cache", "can't open ram connection: %v", err)
}
return mem
}
// Connect will create a connection for the storage
func (m *Memory) Connect(defaultExpiration time.Duration) error {
m.db = cache.New(defaultExpiration, -1)
return nil
}
// HasChunk confirms the existence of a single chunk of an object
func (m *Memory) HasChunk(cachedObject *Object, offset int64) bool {
key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10)
_, found := m.db.Get(key)
return found
}
// GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it
func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10)
var data []byte
if x, found := m.db.Get(key); found {
data = x.([]byte)
return data, nil
}
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
}
// AddChunk adds a new chunk of a cached object
func (m *Memory) AddChunk(fp string, data []byte, offset int64) error {
return m.AddChunkAhead(fp, data, offset, time.Second)
}
// AddChunkAhead adds a new chunk of a cached object
func (m *Memory) AddChunkAhead(fp string, data []byte, offset int64, t time.Duration) error {
key := fp + "-" + strconv.FormatInt(offset, 10)
m.db.Set(key, data, cache.DefaultExpiration)
return nil
}
// CleanChunksByAge will cleanup on a cron basis
func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
m.db.DeleteExpired()
}
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
func (m *Memory) CleanChunksByNeed(offset int64) {
var items map[string]cache.Item
items = m.db.Items()
for key := range items {
sepIdx := strings.LastIndex(key, "-")
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
if err != nil {
fs.Errorf("cache", "couldn't parse offset entry %v", key)
continue
}
if keyOffset < offset {
m.db.Delete(key)
}
}
}
// CleanChunksBySize will cleanup chunks after the total size passes a certain point
func (m *Memory) CleanChunksBySize(maxSize int64) {
// NOOP
}

1099
backend/cache/storage_persistent.go vendored Normal file

File diff suppressed because it is too large Load Diff

1087
backend/crypt/cipher.go Normal file

File diff suppressed because it is too large Load Diff

1290
backend/crypt/cipher_test.go Normal file

File diff suppressed because it is too large Load Diff

748
backend/crypt/crypt.go Normal file
View File

@@ -0,0 +1,748 @@
// Package crypt provides wrappers for Fs and Object which implement encryption
package crypt
import (
"fmt"
"io"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
)
// Globals
var (
// Flags
cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "crypt",
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
}, {
Name: "filename_encryption",
Help: "How to encrypt the filenames.",
Examples: []fs.OptionExample{
{
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
}, {
Value: "standard",
Help: "Encrypt the filenames see the docs for the details.",
}, {
Value: "obfuscate",
Help: "Very simple filename obfuscation.",
},
},
}, {
Name: "directory_name_encryption",
Help: "Option to either encrypt directory names or leave them intact.",
Examples: []fs.OptionExample{
{
Value: "true",
Help: "Encrypt directory names.",
},
{
Value: "false",
Help: "Don't encrypt directory names, leave them intact.",
},
},
}, {
Name: "password",
Help: "Password or pass phrase for encryption.",
IsPassword: true,
}, {
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true,
Optional: true,
}},
})
}
// NewCipher constructs a Cipher for the given config name
func NewCipher(name string) (Cipher, error) {
mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
if err != nil {
return nil, err
}
dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
if err != nil {
return nil, err
}
password := config.FileGet(name, "password", "")
if password == "" {
return nil, errors.New("password not set in config file")
}
password, err = obscure.Reveal(password)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password")
}
salt := config.FileGet(name, "password2", "")
if salt != "" {
salt, err = obscure.Reveal(salt)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password2")
}
}
cipher, err := newCipher(mode, password, salt, dirNameEncrypt)
if err != nil {
return nil, errors.Wrap(err, "failed to make cipher")
}
return cipher, nil
}
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, rpath string) (fs.Fs, error) {
cipher, err := NewCipher(name)
if err != nil {
return nil, err
}
remote := config.FileGet(name, "remote")
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
// Look for a file first
remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
wrappedFs, err := fs.NewFs(remotePath)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = fs.NewFs(remotePath)
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
}
f := &Fs{
Fs: wrappedFs,
name: name,
root: rpath,
cipher: cipher,
}
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false,
BucketBased: true,
CanHaveEmptyDirectories: true,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
doChangeNotify := wrappedFs.Features().ChangeNotify
if doChangeNotify != nil {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
decrypted, err := f.DecryptFileName(path)
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
return doChangeNotify(wrappedNotifyFunc, pollInterval)
}
}
return f, err
}
// Fs represents a wrapped fs.Fs
type Fs struct {
fs.Fs
name string
root string
features *fs.Features // optional features
cipher Cipher
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Encrypted drive '%s:%s'", f.name, f.root)
}
// Encrypt an object file name to entries.
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
remote := obj.Remote()
decryptedRemote, err := f.cipher.DecryptFileName(remote)
if err != nil {
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
return
}
if *cryptShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newObject(obj))
}
// Encrypt an directory file name to entries.
func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
return
}
if *cryptShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newDir(dir))
}
// Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
f.add(&newEntries, x)
case fs.Directory:
f.addDir(&newEntries, x)
default:
return nil, errors.Errorf("Unknown object type %T", entry)
}
}
return newEntries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
if err != nil {
return nil, err
}
return f.encryptEntries(entries)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
newEntries, err := f.encryptEntries(entries)
if err != nil {
return err
}
return callback(newEntries)
})
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
return f.newObject(o), nil
}
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put implements Put or PutStream
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
// Find a hash the destination supports to compute a hash of
// the encrypted data
ht := f.Fs.Hashes().GetOne()
var hasher *hash.MultiHasher
if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return nil, err
}
wrappedIn = io.TeeReader(wrappedIn, hasher)
}
// Transfer the data
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
if err != nil {
return nil, err
}
// Check the hashes of the encrypted data if we were comparing them
if ht != hash.None && hasher != nil {
srcHash := hasher.Sums()[ht]
var dstHash string
dstHash, err = o.Hash(ht)
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
}
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
err = o.Remove()
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
}
}
return f.newObject(o), nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(in, src, options, f.Fs.Put)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(in, src, options, f.Fs.Features().PutStream)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(dir string) error {
return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(dir string) error {
return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
}
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge() error {
do := f.Fs.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do()
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantCopy
}
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
return f.newObject(oResult), nil
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantMove
}
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
return f.newObject(oResult), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
o, err := do(wrappedIn, f.newObjectInfo(src))
if err != nil {
return nil, err
}
return f.newObject(o), nil
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp() error {
do := f.Fs.Features().CleanUp
if do == nil {
return errors.New("can't CleanUp")
}
return do()
}
// About gets quota information from the Fs
func (f *Fs) About() (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("About not supported")
}
return do()
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// EncryptFileName returns an encrypted file name
func (f *Fs) EncryptFileName(fileName string) string {
return f.cipher.EncryptFileName(fileName)
}
// DecryptFileName returns a decrypted file name
func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
return f.cipher.DecryptFileName(encryptedFileName)
}
// ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calcuates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
}
d, err := f.cipher.(*cipher).newDecrypter(in)
if err != nil {
_ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce")
}
nonce := d.nonce
// fs.Debugf(o, "Read nonce % 2x", nonce)
// Check nonce isn't all zeros
isZero := true
for i := range nonce {
if nonce[i] != 0 {
isZero = false
}
}
if isZero {
fs.Errorf(o, "empty nonce read")
}
// Close d (and hence in) once we have read the nonce
err = d.Close()
if err != nil {
return "", errors.Wrap(err, "failed to close nonce read")
}
// Open the src for input
in, err = src.Open()
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// Object describes a wrapped for being read from the Fs
//
// This decrypts the remote name and decrypts the data
type Object struct {
fs.Object
f *Fs
}
func (f *Fs) newObject(o fs.Object) *Object {
return &Object{
Object: o,
f: f,
}
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
remote := o.Object.Remote()
decryptedName, err := o.f.cipher.DecryptFileName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable file name: %v", err)
return remote
}
return decryptedName
}
// Size returns the size of the file
func (o *Object) Size() int64 {
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
if err != nil {
fs.Debugf(o, "Bad size for decrypt: %v", err)
}
return size
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// UnWrap returns the wrapped Object
func (o *Object) UnWrap() fs.Object {
return o.Object
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
// pass on Options to underlying open if appropriate
openOptions = append(openOptions, option)
}
}
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
if underlyingOffset == 0 && underlyingLimit < 0 {
// Open with no seek
return o.Object.Open(openOptions...)
}
// Open stream with a range of underlyingOffset, underlyingLimit
end := int64(-1)
if underlyingLimit >= 0 {
end = underlyingOffset + underlyingLimit - 1
if end >= o.Object.Size() {
end = -1
}
}
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
return o.Object.Open(newOpenOptions...)
}, offset, limit)
if err != nil {
return nil, err
}
return rc, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return o.Object, o.Object.Update(in, src, options...)
}
_, err := o.f.put(in, src, options, update)
return err
}
// newDir returns a dir with the Name decrypted
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
new := fs.NewDirCopy(dir)
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else {
new.SetRemote(decryptedRemote)
}
return new
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
//
// This encrypts the remote name and adjusts the size
type ObjectInfo struct {
fs.ObjectInfo
f *Fs
}
func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
return &ObjectInfo{
ObjectInfo: src,
f: f,
}
}
// Fs returns read only access to the Fs that this object is part of
func (o *ObjectInfo) Fs() fs.Info {
return o.f
}
// Remote returns the remote path
func (o *ObjectInfo) Remote() string {
return o.f.cipher.EncryptFileName(o.ObjectInfo.Remote())
}
// Size returns the size of the file
func (o *ObjectInfo) Size() int64 {
size := o.ObjectInfo.Size()
if size < 0 {
return size
}
return o.f.cipher.EncryptedSize(size)
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
return "", nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
)

View File

@@ -0,0 +1,62 @@
// Test Crypt filesystem interface
package crypt_test
import (
"os"
"path/filepath"
"testing"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fstest/fstests"
)
// TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) {
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
},
})
}
// TestOff runs integration tests against the remote
func TestOff(t *testing.T) {
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
name := "TestCrypt2"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"},
},
})
}
// TestObfuscate runs integration tests against the remote
func TestObfuscate(t *testing.T) {
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt3"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
},
SkipBadWindowsCharacters: true,
})
}

1739
backend/drive/drive.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,112 @@
package drive
import (
"encoding/json"
"testing"
"google.golang.org/api/drive/v3"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
const exampleExportFormats = `{
"application/vnd.google-apps.document": [
"application/rtf",
"application/vnd.oasis.opendocument.text",
"text/html",
"application/pdf",
"application/epub+zip",
"application/zip",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain"
],
"application/vnd.google-apps.spreadsheet": [
"application/x-vnd.oasis.opendocument.spreadsheet",
"text/tab-separated-values",
"application/pdf",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"text/csv",
"application/zip",
"application/vnd.oasis.opendocument.spreadsheet"
],
"application/vnd.google-apps.jam": [
"application/pdf"
],
"application/vnd.google-apps.script": [
"application/vnd.google-apps.script+json"
],
"application/vnd.google-apps.presentation": [
"application/vnd.oasis.opendocument.presentation",
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"text/plain"
],
"application/vnd.google-apps.form": [
"application/zip"
],
"application/vnd.google-apps.drawing": [
"image/svg+xml",
"image/png",
"application/pdf",
"image/jpeg"
]
}`
var exportFormats map[string][]string
// Load the example export formats into exportFormats for testing
func TestInternalLoadExampleExportFormats(t *testing.T) {
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &exportFormats))
}
func TestInternalParseExtensions(t *testing.T) {
for _, test := range []struct {
in string
want []string
wantErr error
}{
{"doc", []string{"doc"}, nil},
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
} {
f := new(Fs)
gotErr := f.parseExtensions(test.in)
if test.wantErr == nil {
assert.NoError(t, gotErr)
} else {
assert.EqualError(t, gotErr, test.wantErr.Error())
}
assert.Equal(t, test.want, f.extensions)
}
// Test it is appending
f := new(Fs)
assert.Nil(t, f.parseExtensions("docx,svg"))
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
}
func TestInternalFindExportFormat(t *testing.T) {
item := new(drive.File)
item.MimeType = "application/vnd.google-apps.document"
for _, test := range []struct {
extensions []string
wantExtension string
wantMimeType string
}{
{[]string{}, "", ""},
{[]string{"pdf"}, "pdf", "application/pdf"},
{[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
{[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
{[]string{"xls", "csv", "svg"}, "", ""},
} {
f := new(Fs)
f.extensions = test.extensions
gotExtension, gotMimeType := f.findExportFormat("file", exportFormats[item.MimeType])
assert.Equal(t, test.wantExtension, gotExtension)
assert.Equal(t, test.wantMimeType, gotMimeType)
}
}

View File

@@ -0,0 +1,17 @@
// Test Drive filesystem interface
package drive_test
import (
"testing"
"github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDrive:",
NilObject: (*drive.Object)(nil),
})
}

249
backend/drive/upload.go Normal file
View File

@@ -0,0 +1,249 @@
// Upload for drive
//
// Docs
// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
// Files insert: https://developers.google.com/drive/v2/reference/files/insert
// Files update: https://developers.google.com/drive/v2/reference/files/update
//
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
package drive
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
const (
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
statusResumeIncomplete = 308
)
// resumableUpload is used by the generated APIs to provide resumable uploads.
// It is not used by developers directly.
type resumableUpload struct {
f *Fs
remote string
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
URI string
// Media is the object being uploaded.
Media io.Reader
// MediaType defines the media type, e.g. "image/jpeg".
MediaType string
// ContentLength is the full size of the object being uploaded.
ContentLength int64
// Return value
ret *drive.File
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
params := make(url.Values)
params.Set("alt", "json")
params.Set("uploadType", "resumable")
params.Set("fields", partialFields)
if f.isTeamDrive {
params.Set("supportsTeamDrives", "true")
}
if *driveKeepRevisionForever {
params.Set("keepRevisionForever", "true")
}
urls := "https://www.googleapis.com/upload/drive/v3/files"
method := "POST"
if fileID != "" {
params.Set("setModifiedDate", "true")
urls += "/{fileId}"
method = "PATCH"
}
urls += "?" + params.Encode()
var res *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
var body io.Reader
body, err = googleapi.WithoutDataWrapper.JSONReader(info)
if err != nil {
return false, err
}
var req *http.Request
req, err = http.NewRequest(method, urls, body)
if err != nil {
return false, err
}
googleapi.Expand(req.URL, map[string]string{
"fileId": fileID,
})
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType)
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
res, err = f.client.Do(req)
if err == nil {
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
return shouldRetry(err)
})
if err != nil {
return nil, err
}
loc := res.Header.Get("Location")
rx := &resumableUpload{
f: f,
remote: remote,
URI: loc,
Media: in,
MediaType: contentType,
ContentLength: size,
}
return rx.Upload()
}
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequest("POST", rx.URI, body)
req.ContentLength = reqSize
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
}
req.Header.Set("Content-Type", rx.MediaType)
return req
}
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil, 0)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, errors.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart)
req := rx.makeRequest(start, chunk, chunkSize)
res, err := rx.f.client.Do(req)
if err != nil {
return 599, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == statusResumeIncomplete {
return res.StatusCode, nil
}
err = googleapi.CheckResponse(res)
if err != nil {
return res.StatusCode, err
}
// When the entire file upload is complete, the server
// responds with an HTTP 201 Created along with any metadata
// associated with this resource. If this request had been
// updating an existing entity rather than creating a new one,
// the HTTP response code for a completed upload would have
// been 200 OK.
//
// So parse the response out of the body. We aren't expecting
// any other 2xx codes, so we parse it unconditionaly on
// StatusCode
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
return 598, err
}
return res.StatusCode, nil
}
// Upload uploads the chunks from the input
// It retries each chunk using the pacer and --low-level-retries
func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
var StatusCode int
var err error
buf := make([]byte, int(chunkSize))
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false
err = nil
}
return again, err
})
if err != nil {
return nil, err
}
start += reqSize
}
// Resume or retry uploads that fail due to connection interruptions or
// any 5xx errors, including:
//
// 500 Internal Server Error
// 502 Bad Gateway
// 503 Service Unavailable
// 504 Gateway Timeout
//
// Use an exponential backoff strategy if any 5xx server error is
// returned when resuming or retrying upload requests. These errors can
// occur if a server is getting overloaded. Exponential backoff can help
// alleviate these kinds of problems during periods of high volume of
// requests or heavy network traffic. Other kinds of requests should not
// be handled by exponential backoff but you can still retry a number of
// them. When retrying these requests, limit the number of times you
// retry them. For example your code could limit to ten retries or less
// before reporting an error.
//
// Handle 404 Not Found errors when doing resumable uploads by starting
// the entire upload over from the beginning.
if rx.ret == nil {
return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
}
return rx.ret, nil
}

View File

@@ -0,0 +1,127 @@
// Package dbhash implements the dropbox hash as described in
//
// https://www.dropbox.com/developers/reference/content-hash
package dbhash
import (
"crypto/sha256"
"hash"
)
const (
// BlockSize of the checksum in bytes.
BlockSize = sha256.BlockSize
// Size of the checksum in bytes.
Size = sha256.BlockSize
bytesPerBlock = 4 * 1024 * 1024
hashReturnedError = "hash function returned error"
)
type digest struct {
n int // bytes written into blockHash so far
blockHash hash.Hash
totalHash hash.Hash
sumCalled bool
writtenMore bool
}
// New returns a new hash.Hash computing the Dropbox checksum.
func New() hash.Hash {
d := &digest{}
d.Reset()
return d
}
// writeBlockHash writes the current block hash into the total hash
func (d *digest) writeBlockHash() {
blockHash := d.blockHash.Sum(nil)
_, err := d.totalHash.Write(blockHash)
if err != nil {
panic(hashReturnedError)
}
// reset counters for blockhash
d.n = 0
d.blockHash.Reset()
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
func (d *digest) Write(p []byte) (n int, err error) {
n = len(p)
for len(p) > 0 {
d.writtenMore = true
toWrite := bytesPerBlock - d.n
if toWrite > len(p) {
toWrite = len(p)
}
_, err = d.blockHash.Write(p[:toWrite])
if err != nil {
panic(hashReturnedError)
}
d.n += toWrite
p = p[toWrite:]
// Accumulate the total hash
if d.n == bytesPerBlock {
d.writeBlockHash()
}
}
return n, nil
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
//
// TODO(ncw) Sum() can only be called once for this type of hash.
// If you call Sum(), then Write() then Sum() it will result in
// a panic. Calling Write() then Sum(), then Sum() is OK.
func (d *digest) Sum(b []byte) []byte {
if d.sumCalled && d.writtenMore {
panic("digest.Sum() called more than once")
}
d.sumCalled = true
d.writtenMore = false
if d.n != 0 {
d.writeBlockHash()
}
return d.totalHash.Sum(b)
}
// Reset resets the Hash to its initial state.
func (d *digest) Reset() {
d.n = 0
d.totalHash = sha256.New()
d.blockHash = sha256.New()
d.sumCalled = false
d.writtenMore = false
}
// Size returns the number of bytes Sum will return.
func (d *digest) Size() int {
return d.totalHash.Size()
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (d *digest) BlockSize() int {
return d.totalHash.BlockSize()
}
// Sum returns the Dropbox checksum of the data.
func Sum(data []byte) [Size]byte {
var d digest
d.Reset()
_, _ = d.Write(data)
var out [Size]byte
d.Sum(out[:0])
return out
}
// must implement this interface
var _ hash.Hash = (*digest)(nil)

View File

@@ -0,0 +1,88 @@
package dbhash_test
import (
"encoding/hex"
"fmt"
"testing"
"github.com/ncw/rclone/backend/dropbox/dbhash"
"github.com/stretchr/testify/assert"
)
func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk)
for i := 0; i < chunk; i++ {
data[i] = 'A'
}
for _, test := range []struct {
n int
want string
}{
{0, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
{1, "1cd6ef71e6e0ff46ad2609d403dc3fee244417089aa4461245a4e4fe23a55e42"},
{2, "01e0655fb754d10418a73760f57515f4903b298e6d67dda6bf0987fa79c22c88"},
{4096, "8620913d33852befe09f16fff8fd75f77a83160d29f76f07e0276e9690903035"},
{4194303, "647c8627d70f7a7d13ce96b1e7710a771a55d41a62c3da490d92e56044d311fa"},
{4194304, "d4d63bac5b866c71620185392a8a6218ac1092454a2d16f820363b69852befa3"},
{4194305, "8f553da8d00d0bf509d8470e242888be33019c20c0544811f5b2b89e98360b92"},
{8388607, "83b30cf4fb5195b04a937727ae379cf3d06673bf8f77947f6a92858536e8369c"},
{8388608, "e08b3ba1f538804075c5f939accdeaa9efc7b5c01865c94a41e78ca6550a88e7"},
{8388609, "02c8a4aefc2bfc9036f89a7098001865885938ca580e5c9e5db672385edd303c"},
} {
d := dbhash.New()
var toWrite int
for toWrite = test.n; toWrite >= chunk; toWrite -= chunk {
n, err := d.Write(data)
assert.Nil(t, err)
assert.Equal(t, chunk, n)
}
n, err := d.Write(data[:toWrite])
assert.Nil(t, err)
assert.Equal(t, toWrite, n)
got := hex.EncodeToString(d.Sum(nil))
assert.Equal(t, test.want, got, fmt.Sprintf("when testing length %d", n))
}
}
func TestHashChunk16M(t *testing.T) { testChunk(t, 16*1024*1024) }
func TestHashChunk8M(t *testing.T) { testChunk(t, 8*1024*1024) }
func TestHashChunk4M(t *testing.T) { testChunk(t, 4*1024*1024) }
func TestHashChunk2M(t *testing.T) { testChunk(t, 2*1024*1024) }
func TestHashChunk1M(t *testing.T) { testChunk(t, 1*1024*1024) }
func TestHashChunk64k(t *testing.T) { testChunk(t, 64*1024) }
func TestHashChunk32k(t *testing.T) { testChunk(t, 32*1024) }
func TestHashChunk2048(t *testing.T) { testChunk(t, 2048) }
func TestHashChunk2047(t *testing.T) { testChunk(t, 2047) }
func TestSumCalledTwice(t *testing.T) {
d := dbhash.New()
assert.NotPanics(t, func() { d.Sum(nil) })
d.Reset()
assert.NotPanics(t, func() { d.Sum(nil) })
assert.NotPanics(t, func() { d.Sum(nil) })
_, _ = d.Write([]byte{1})
assert.Panics(t, func() { d.Sum(nil) })
}
func TestSize(t *testing.T) {
d := dbhash.New()
assert.Equal(t, 32, d.Size())
}
func TestBlockSize(t *testing.T) {
d := dbhash.New()
assert.Equal(t, 64, d.BlockSize())
}
func TestSum(t *testing.T) {
assert.Equal(t,
[64]byte{
0x1c, 0xd6, 0xef, 0x71, 0xe6, 0xe0, 0xff, 0x46,
0xad, 0x26, 0x09, 0xd4, 0x03, 0xdc, 0x3f, 0xee,
0x24, 0x44, 0x17, 0x08, 0x9a, 0xa4, 0x46, 0x12,
0x45, 0xa4, 0xe4, 0xfe, 0x23, 0xa5, 0x5e, 0x42,
},
dbhash.Sum([]byte{'A'}),
)
}

1063
backend/dropbox/dropbox.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
// Test Dropbox filesystem interface
package dropbox_test
import (
"testing"
"github.com/ncw/rclone/backend/dropbox"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDropbox:",
NilObject: (*dropbox.Object)(nil),
})
}

773
backend/ftp/ftp.go Normal file
View File

@@ -0,0 +1,773 @@
// Package ftp interfaces with FTP servers
package ftp
import (
"io"
"net/textproto"
"net/url"
"os"
"path"
"strings"
"sync"
"time"
"github.com/jlaffaye/ftp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "ftp",
Description: "FTP Connection",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "host",
Help: "FTP host to connect to",
Optional: false,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
Optional: true,
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21) ",
Optional: true,
}, {
Name: "pass",
Help: "FTP password",
IsPassword: true,
Optional: false,
},
},
})
}
// Fs represents a remote FTP server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
features *fs.Features // optional features
url string
user string
pass string
dialAddr string
poolMu sync.Mutex
pool []*ftp.ServerConn
}
// Object describes an FTP file
type Object struct {
fs *Fs
remote string
info *FileInfo
}
// FileInfo is the metadata known about an FTP file
type FileInfo struct {
Name string
Size uint64
ModTime time.Time
IsDir bool
}
// ------------------------------------------------------------
// Name of this fs
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return f.url
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server")
c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout)
if err != nil {
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial")
}
err = c.Login(f.user, f.pass)
if err != nil {
_ = c.Quit()
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Login")
}
return c, nil
}
// Get an FTP connection from the pool, or open a new one
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
f.poolMu.Lock()
if len(f.pool) > 0 {
c = f.pool[0]
f.pool = f.pool[1:]
}
f.poolMu.Unlock()
if c != nil {
return c, nil
}
return f.ftpConnection()
}
// Return an FTP connection to the pool
//
// It nils the pointed to connection out so it can't be reused
//
// if err is not nil then it checks the connection is alive using a
// NOOP request
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
c := *pc
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
_, isRegularError := errors.Cause(err).(*textproto.Error)
if !isRegularError {
nopErr := c.NoOp()
if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
_ = c.Quit()
return
}
}
}
f.poolMu.Lock()
f.pool = append(f.pool, c)
f.poolMu.Unlock()
}
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string) (ff fs.Fs, err error) {
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// FIXME Convert the old scheme used for the first beta - remove after release
if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
fs.Infof(name, "Converting old configuration")
u, err := url.Parse(ftpURL)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
}
parts := strings.Split(u.Host, ":")
config.FileSet(name, "host", parts[0])
if len(parts) > 1 {
config.FileSet(name, "port", parts[1])
}
config.FileSet(name, "host", u.Host)
config.FileSet(name, "user", config.FileGet(name, "username"))
config.FileSet(name, "pass", config.FileGet(name, "password"))
config.FileDeleteKey(name, "username")
config.FileDeleteKey(name, "password")
config.FileDeleteKey(name, "url")
config.SaveConfig()
if u.Path != "" && u.Path != "/" {
fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
}
}
host := config.FileGet(name, "host")
user := config.FileGet(name, "user")
pass := config.FileGet(name, "pass")
port := config.FileGet(name, "port")
pass, err = obscure.Reveal(pass)
if err != nil {
return nil, errors.Wrap(err, "NewFS decrypt password")
}
if user == "" {
user = os.Getenv("USER")
}
if port == "" {
port = "21"
}
dialAddr := host + ":" + port
u := "ftp://" + path.Join(dialAddr+"/", root)
f := &Fs{
name: name,
root: root,
url: u,
user: user,
pass: pass,
dialAddr: dialAddr,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(f)
// Make a connection and pool it to return errors early
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "NewFs")
}
f.putFtpConnection(&c, nil)
if root != "" {
// Check to see if the root actually an existing file
remote := path.Base(root)
f.root = path.Dir(root)
if f.root == "." {
f.root = ""
}
_, err := f.NewObject(remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
// File doesn't exist so return old f
f.root = root
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, err
}
// translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorObjectNotFound
}
}
return err
}
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
func translateErrorDir(err error) error {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorDirNotFound
}
}
return err
}
// findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote)
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "findItem")
}
files, err := c.List(dir)
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for _, file := range files {
if file.Name == base {
return file, nil
}
}
return nil, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(remote)
if err != nil {
return nil, err
}
if entry != nil && entry.Type != ftp.EntryTypeFolder {
o := &Object{
fs: f,
remote: remote,
}
info := &FileInfo{
Name: remote,
Size: entry.Size,
ModTime: entry.Time,
}
o.info = info
return o, nil
}
return nil, fs.ErrorObjectNotFound
}
// dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(remote string) (exists bool, err error) {
entry, err := f.findItem(remote)
if err != nil {
return false, errors.Wrap(err, "dirExists")
}
if entry != nil && entry.Type == ftp.EntryTypeFolder {
return true, nil
}
return false, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "list")
}
files, err := c.List(path.Join(f.root, dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorDir(err)
}
// Annoyingly FTP returns success for a directory which
// doesn't exist, so check it really doesn't exist if no
// entries found.
if len(files) == 0 {
exists, err := f.dirExists(dir)
if err != nil {
return nil, errors.Wrap(err, "list")
}
if !exists {
return nil, fs.ErrorDirNotFound
}
}
for i := range files {
object := files[i]
newremote := path.Join(dir, object.Name)
switch object.Type {
case ftp.EntryTypeFolder:
if object.Name == "." || object.Name == ".." {
continue
}
d := fs.NewDir(newremote, object.Time)
entries = append(entries, d)
default:
o := &Object{
fs: f,
remote: newremote,
}
info := &FileInfo{
Name: newremote,
Size: object.Size,
ModTime: object.Time,
}
o.info = info
entries = append(entries, o)
}
}
return entries, nil
}
// Hashes are not supported
func (f *Fs) Hashes() hash.Set {
return 0
}
// Precision shows Modified Time not supported
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(src.Remote())
if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed")
}
o := &Object{
fs: f,
remote: src.Remote(),
}
err = o.Update(in, src, options...)
return o, err
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// getInfo reads the FileInfo for a path
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "getInfo")
}
files, err := c.List(dir)
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for i := range files {
if files[i].Name == base {
info := &FileInfo{
Name: remote,
Size: files[i].Size,
ModTime: files[i].Time,
IsDir: files[i].Type == ftp.EntryTypeFolder,
}
return info, nil
}
}
return nil, fs.ErrorObjectNotFound
}
// mkdir makes the directory and parents using unrooted paths
func (f *Fs) mkdir(abspath string) error {
if abspath == "." || abspath == "/" {
return nil
}
fi, err := f.getInfo(abspath)
if err == nil {
if fi.IsDir {
return nil
}
return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound {
return errors.Wrapf(err, "mkdir %q failed", abspath)
}
parent := path.Dir(abspath)
err = f.mkdir(parent)
if err != nil {
return err
}
c, connErr := f.getFtpConnection()
if connErr != nil {
return errors.Wrap(connErr, "mkdir")
}
err = c.MakeDir(abspath)
f.putFtpConnection(&c, err)
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
err = nil
case 521: // dir already exists: error number according to RFC 959: issue #2363
err = nil
}
}
return err
}
// mkParentDir makes the parent of remote if necessary and any
// directories above that
func (f *Fs) mkParentDir(remote string) error {
parent := path.Dir(remote)
return f.mkdir(path.Join(f.root, parent))
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir)
return f.mkdir(root)
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(dir string) error {
c, err := f.getFtpConnection()
if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir")
}
err = c.RemoveDir(path.Join(f.root, dir))
f.putFtpConnection(&c, err)
return translateErrorDir(err)
}
// Move renames a remote file object
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
err := f.mkParentDir(remote)
if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed")
}
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "Move")
}
err = c.Rename(
path.Join(srcObj.fs.root, srcObj.remote),
path.Join(f.root, remote),
)
f.putFtpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "Move Rename failed")
}
dstObj, err := f.NewObject(remote)
if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed")
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
// Check if destination exists
fi, err := f.getInfo(dstPath)
if err == nil {
if fi.IsDir {
return fs.ErrorDirExists
}
return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound {
return errors.Wrapf(err, "DirMove getInfo failed")
}
// Make sure the parent directory exists
err = f.mkdir(path.Dir(dstPath))
if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed")
}
// Do the move
c, err := f.getFtpConnection()
if err != nil {
return errors.Wrap(err, "DirMove")
}
err = c.Rename(
srcPath,
dstPath,
)
f.putFtpConnection(&c, err)
if err != nil {
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
}
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// String version of o
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the hash of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return int64(o.info.Size)
}
// ModTime returns the modification time of the object
func (o *Object) ModTime() time.Time {
return o.info.ModTime
}
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(modTime time.Time) error {
return nil
}
// Storable returns a boolean as to whether this object is storable
func (o *Object) Storable() bool {
return true
}
// ftpReadCloser implements io.ReadCloser for FTP objects.
type ftpReadCloser struct {
rc io.ReadCloser
c *ftp.ServerConn
f *Fs
err error // errors found during read
}
// Read bytes into p
func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
n, err = f.rc.Read(p)
if err != nil && err != io.EOF {
f.err = err // store any errors for Close to examine
}
return
}
// Close the FTP reader and return the connection to the pool
func (f *ftpReadCloser) Close() error {
err := f.rc.Close()
// if errors while reading or closing, dump the connection
if err != nil || f.err != nil {
_ = f.c.Quit()
} else {
f.f.putFtpConnection(&f.c, nil)
}
// mask the error if it was caused by a premature close
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
err = nil
}
}
return err
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
path := path.Join(o.fs.root, o.remote)
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
c, err := o.fs.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "open")
}
fd, err := c.RetrFrom(path, uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open")
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
path := path.Join(o.fs.root, o.remote)
// remove the file if upload failed
remove := func() {
removeErr := o.Remove()
if removeErr != nil {
fs.Debugf(o, "Failed to remove: %v", removeErr)
} else {
fs.Debugf(o, "Removed after failed upload: %v", err)
}
}
c, err := o.fs.getFtpConnection()
if err != nil {
return errors.Wrap(err, "Update")
}
err = c.Stor(path, in)
if err != nil {
_ = c.Quit()
remove()
return errors.Wrap(err, "update stor")
}
o.fs.putFtpConnection(&c, nil)
o.info, err = o.fs.getInfo(path)
if err != nil {
return errors.Wrap(err, "update getinfo")
}
return nil
}
// Remove an object
func (o *Object) Remove() (err error) {
// defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file
info, err := o.fs.getInfo(path)
if err != nil {
return err
}
if info.IsDir {
err = o.fs.Rmdir(o.remote)
} else {
c, err := o.fs.getFtpConnection()
if err != nil {
return errors.Wrap(err, "Remove")
}
err = c.Delete(path)
o.fs.putFtpConnection(&c, err)
}
return err
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Object = &Object{}
)

17
backend/ftp/ftp_test.go Normal file
View File

@@ -0,0 +1,17 @@
// Test FTP filesystem interface
package ftp_test
import (
"testing"
"github.com/ncw/rclone/backend/ftp"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTP:",
NilObject: (*ftp.Object)(nil),
})
}

View File

@@ -0,0 +1,984 @@
// Package googlecloudstorage provides an interface to Google Cloud Storage
package googlecloudstorage
/*
Notes
Can't set Updated but can set Metadata on object creation
Patch needs full_control not just read_write
FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 error
- https://code.google.com/p/google-api-go-client/issues/detail?id=64
*/
import (
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
storage "google.golang.org/api/storage/v1"
)
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 1000 // chunk size to read directory listings
minSleep = 10 * time.Millisecond
)
var (
gcsLocation = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
// Description of how to auth for this app
storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageFullControlScope},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "google cloud storage",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(name string) {
if config.FileGet(name, "service_account_file") != "" {
return
}
err := oauthutil.Config("google cloud storage", name, storageConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id - leave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret - leave blank normally.",
}, {
Name: "project_number",
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.",
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
}, {
Value: "bucketOwnerFullControl",
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
}, {
Value: "bucketOwnerRead",
Help: "Object owner gets OWNER access, and project team owners get READER access.",
}, {
Value: "private",
Help: "Object owner gets OWNER access [default if left blank].",
}, {
Value: "projectPrivate",
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Object owner gets OWNER access, and all Users get READER access.",
}},
}, {
Name: "bucket_acl",
Help: "Access Control List for new buckets.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
}, {
Value: "private",
Help: "Project team owners get OWNER access [default if left blank].",
}, {
Value: "projectPrivate",
Help: "Project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Project team owners get OWNER access, and all Users get READER access.",
}, {
Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
}},
}, {
Name: "location",
Help: "Location for the newly created buckets.",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for default location (US).",
}, {
Value: "asia",
Help: "Multi-regional location for Asia.",
}, {
Value: "eu",
Help: "Multi-regional location for Europe.",
}, {
Value: "us",
Help: "Multi-regional location for United States.",
}, {
Value: "asia-east1",
Help: "Taiwan.",
}, {
Value: "asia-northeast1",
Help: "Tokyo.",
}, {
Value: "asia-southeast1",
Help: "Singapore.",
}, {
Value: "australia-southeast1",
Help: "Sydney.",
}, {
Value: "europe-west1",
Help: "Belgium.",
}, {
Value: "europe-west2",
Help: "London.",
}, {
Value: "us-central1",
Help: "Iowa.",
}, {
Value: "us-east1",
Help: "South Carolina.",
}, {
Value: "us-east4",
Help: "Northern Virginia.",
}, {
Value: "us-west1",
Help: "Oregon.",
}},
}, {
Name: "storage_class",
Help: "The storage class to use when storing objects in Google Cloud Storage.",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "MULTI_REGIONAL",
Help: "Multi-regional storage class",
}, {
Value: "REGIONAL",
Help: "Regional storage class",
}, {
Value: "NEARLINE",
Help: "Nearline storage class",
}, {
Value: "COLDLINE",
Help: "Coldline storage class",
}, {
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
}},
}},
})
}
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
projectNumber string // used for finding buckets
objectACL string // used when creating new objects
bucketACL string // used when creating new buckets
location string // location of new buckets
storageClass string // storage class of new buckets
pacer *pacer.Pacer // To pace the API calls
}
// Object describes a storage object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
url string // download path
md5sum string // The MD5Sum of the object
bytes int64 // Bytes in the object
modTime time.Time // Modified time of the object
mimeType string
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.root == "" {
return fmt.Sprintf("Storage bucket %s", f.bucket)
}
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// shouldRetry determines whehter a given err rates being retried
func shouldRetry(err error) (again bool, errOut error) {
again = false
if err != nil {
if fserrors.ShouldRetry(err) {
again = true
} else {
switch gerr := err.(type) {
case *googleapi.Error:
if gerr.Code >= 500 && gerr.Code < 600 {
// All 5xx errors should be retried
again = true
} else if len(gerr.Errors) > 0 {
reason := gerr.Errors[0].Reason
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
again = true
}
}
}
}
}
return again, err
}
// Pattern to match a storage path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a storage 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find bucket in storage path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
}
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
var oAuthClient *http.Client
var err error
// try loading service account credentials from env variable, then from a file
serviceAccountCreds := []byte(config.FileGet(name, "service_account_credentials"))
serviceAccountPath := config.FileGet(name, "service_account_file")
if len(serviceAccountCreds) == 0 && serviceAccountPath != "" {
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(serviceAccountPath))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
}
serviceAccountCreds = loadedCreds
}
if len(serviceAccountCreds) > 0 {
oAuthClient, err = getServiceAccountClient(serviceAccountCreds)
if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
}
} else {
oAuthClient, _, err = oauthutil.NewClient(name, storageConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
}
}
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
bucket: bucket,
root: directory,
projectNumber: config.FileGet(name, "project_number"),
objectACL: config.FileGet(name, "object_acl"),
bucketACL: config.FileGet(name, "bucket_acl"),
location: config.FileGet(name, "location"),
storageClass: config.FileGet(name, "storage_class"),
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
if f.objectACL == "" {
f.objectACL = "private"
}
if f.bucketACL == "" {
f.bucketACL = "private"
}
if *gcsLocation != "" {
f.location = *gcsLocation
}
if *gcsStorageClass != "" {
f.storageClass = *gcsStorageClass
}
// Create a new authorized Drive client.
f.client = oAuthClient
f.svc, err = storage.New(f.client)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
}
if f.root != "" {
f.root += "/"
// Check to see if the object exists
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(bucket, directory).Do()
return shouldRetry(err)
})
if err == nil {
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
return f, nil
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
o.setMetaData(info)
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *storage.Object, isDirectory bool) error
// list the objects into the function supplied
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
root := f.root
rootLength := len(root)
if dir != "" {
root += dir + "/"
}
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
if !recurse {
list = list.Delimiter("/")
}
for {
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
objects, err = list.Do()
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
return err
}
if !recurse {
var object storage.Object
for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") {
continue
}
err = fn(prefix[rootLength:len(prefix)-1], &object, true)
if err != nil {
return err
}
}
}
for _, object := range objects.Items {
if !strings.HasPrefix(object.Name, root) {
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
// is this a directory marker?
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
if recurse && remote != "" {
// add a directory in if --fast-list since will have no prefixes
err = fn(remote[:len(remote)-1], object, true)
if err != nil {
return err
}
}
continue // skip directory marker
}
err = fn(remote, object, false)
if err != nil {
return err
}
}
if objects.NextPageToken == "" {
break
}
list.PageToken(objects.NextPageToken)
}
return nil
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
return d, nil
}
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketOKMu.Unlock()
}
}
// listDir lists a single directory
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
// List the objects
err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return entries, err
}
// listBuckets lists the buckets
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
if f.projectNumber == "" {
return nil, errors.New("can't list buckets without project number")
}
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
for {
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
buckets, err = listBuckets.Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
for _, bucket := range buckets.Items {
d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d)
}
if buckets.NextPageToken == "" {
break
}
listBuckets.PageToken(buckets.NextPageToken)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(dir)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
return o, o.Update(in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(dir string) (err error) {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
f.bucketOK = true
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
}
} else {
return errors.Wrap(err, "failed to get bucket")
}
if f.projectNumber == "" {
return errors.New("can't make bucket without project number")
}
bucket := storage.Bucket{
Name: f.bucket,
Location: f.location,
StorageClass: f.storageClass,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = true
}
return err
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty.
func (f *Fs) Rmdir(dir string) (err error) {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
err = f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(f.bucket).Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = false
}
return err
}
// Precision returns the precision
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir("")
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
srcBucket := srcObj.fs.bucket
srcObject := srcObj.fs.root + srcObj.remote
dstBucket := f.bucket
dstObject := f.root + remote
var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) {
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
// Set the metadata for the new object while we have it
dstObj.setMetaData(newObject)
return dstObj, nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.bytes
}
// setMetaData sets the fs data from a storage.Object
func (o *Object) setMetaData(info *storage.Object) {
o.url = info.MediaLink
o.bytes = int64(info.Size)
o.mimeType = info.ContentType
// Read md5sum
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
if err != nil {
fs.Logf(o, "Bad MD5 decode: %v", err)
} else {
o.md5sum = hex.EncodeToString(md5sumData)
}
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(timeFormatIn, mtimeString)
if err == nil {
o.modTime = modTime
return
}
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormatIn, info.Updated)
if err != nil {
fs.Logf(o, "Bad time decode: %v", err)
} else {
o.modTime = modTime
}
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
var object *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
o.setMetaData(object)
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
// fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
return o.modTime
}
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormatOut)
return metadata
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) (err error) {
// This only adds metadata so will perserve other metadata
object := storage.Object{
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
Metadata: metadataFromModTime(modTime),
}
var newObject *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
return shouldRetry(err)
})
if err != nil {
return err
}
o.setMetaData(newObject)
return nil
}
// Storable returns a boolean as to whether this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
req, err := http.NewRequest("GET", o.url, nil)
if err != nil {
return nil, err
}
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.client.Do(req)
if err == nil {
err = googleapi.CheckResponse(res)
if err != nil {
_ = res.Body.Close() // ignore error
}
}
return shouldRetry(err)
})
if err != nil {
return nil, err
}
_, isRanging := req.Header["Range"]
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
_ = res.Body.Close() // ignore error
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
err := o.fs.Mkdir("")
if err != nil {
return err
}
modTime := src.ModTime()
object := storage.Object{
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
ContentType: fs.MimeType(src),
Updated: modTime.Format(timeFormatOut), // Doesn't get set
Metadata: metadataFromModTime(modTime),
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do()
return shouldRetry(err)
})
if err != nil {
return err
}
// Set the metadata for the new object while we have it
o.setMetaData(newObject)
return nil
}
// Remove an object
func (o *Object) Remove() (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
return err
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string {
return o.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -0,0 +1,17 @@
// Test GoogleCloudStorage filesystem interface
package googlecloudstorage_test
import (
"testing"
"github.com/ncw/rclone/backend/googlecloudstorage"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestGoogleCloudStorage:",
NilObject: (*googlecloudstorage.Object)(nil),
})
}

489
backend/http/http.go Normal file
View File

@@ -0,0 +1,489 @@
// Package http provides a filesystem interface using golang.org/net/http
//
// It treats HTML pages served from the endpoint as directory
// listings, and includes any links found as files.
package http
import (
"io"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"golang.org/x/net/html"
)
var (
errorReadOnly = errors.New("http remotes are read only")
timeUnset = time.Unix(0, 0)
)
func init() {
fsi := &fs.RegInfo{
Name: "http",
Description: "http Connection",
NewFs: NewFs,
Options: []fs.Option{{
Name: "url",
Help: "URL of http host to connect to",
Optional: false,
Examples: []fs.OptionExample{{
Value: "https://example.com",
Help: "Connect to example.com",
}},
}},
}
fs.Register(fsi)
}
// Fs stores the interface to the remote HTTP files
type Fs struct {
name string
root string
features *fs.Features // optional features
endpoint *url.URL
endpointURL string // endpoint as a string
httpClient *http.Client
}
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
contentType string
}
// statusError returns an error if the res contained an error
func statusError(res *http.Response, err error) error {
if err != nil {
return err
}
if res.StatusCode < 200 || res.StatusCode > 299 {
_ = res.Body.Close()
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
}
return nil
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string) (fs.Fs, error) {
endpoint := config.FileGet(name, "url")
if !strings.HasSuffix(endpoint, "/") {
endpoint += "/"
}
// Parse the endpoint and stick the root onto it
base, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
u, err := rest.URLJoin(base, rest.URLPathEscape(root))
if err != nil {
return nil, err
}
client := fshttp.NewClient(fs.Config)
var isFile = false
if !strings.HasSuffix(u.String(), "/") {
// Make a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
// check to see if points to a file
res, err := noRedir.Head(u.String())
err = statusError(res, err)
if err == nil {
isFile = true
}
}
newRoot := u.String()
if isFile {
// Point to the parent if this is a file
newRoot, _ = path.Split(u.String())
} else {
if !strings.HasSuffix(newRoot, "/") {
newRoot += "/"
}
}
u, err = url.Parse(newRoot)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
httpClient: client,
endpoint: u,
endpointURL: u.String(),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(f)
if isFile {
return f, fs.ErrorIsFile
}
if !strings.HasSuffix(f.endpointURL, "/") {
return nil, errors.New("internal error: url doesn't end with /")
}
return f, nil
}
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root returns the root for the filesystem
func (f *Fs) Root() string {
return f.root
}
// String returns the URL for the filesystem
func (f *Fs) String() string {
return f.endpointURL
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
func (f *Fs) Precision() time.Duration {
return time.Second
}
// NewObject creates a new remote http file object
func (f *Fs) NewObject(remote string) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
err := o.stat()
if err != nil {
return nil, errors.Wrap(err, "Stat failed")
}
return o, nil
}
// Join's the remote onto the base URL
func (f *Fs) url(remote string) string {
return f.endpointURL + rest.URLPathEscape(remote)
}
// parse s into an int64, on failure return def
func parseInt64(s string, def int64) int64 {
n, e := strconv.ParseInt(s, 10, 64)
if e != nil {
return def
}
return n
}
// Errors returned by parseName
var (
errURLJoinFailed = errors.New("URLJoin failed")
errFoundQuestionMark = errors.New("found ? in URL")
errHostMismatch = errors.New("host mismatch")
errSchemeMismatch = errors.New("scheme mismatch")
errNotUnderRoot = errors.New("not under root")
errNameIsEmpty = errors.New("name is empty")
errNameContainsSlash = errors.New("name contains /")
)
// parseName turns a name as found in the page into a remote path or returns an error
func parseName(base *url.URL, name string) (string, error) {
// make URL absolute
u, err := rest.URLJoin(base, name)
if err != nil {
return "", errURLJoinFailed
}
// check it doesn't have URL parameters
uStr := u.String()
if strings.Index(uStr, "?") >= 0 {
return "", errFoundQuestionMark
}
// check that this is going back to the same host and scheme
if base.Host != u.Host {
return "", errHostMismatch
}
if base.Scheme != u.Scheme {
return "", errSchemeMismatch
}
// check has path prefix
if !strings.HasPrefix(u.Path, base.Path) {
return "", errNotUnderRoot
}
// calculate the name relative to the base
name = u.Path[len(base.Path):]
// musn't be empty
if name == "" {
return "", errNameIsEmpty
}
// mustn't contain a / - we are looking for a single level directory
slash := strings.Index(name, "/")
if slash >= 0 && slash != len(name)-1 {
return "", errNameContainsSlash
}
return name, nil
}
// Parse turns HTML for a directory into names
// base should be the base URL to resolve any relative names from
func parse(base *url.URL, in io.Reader) (names []string, err error) {
doc, err := html.Parse(in)
if err != nil {
return nil, err
}
var walk func(*html.Node)
walk = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key == "href" {
name, err := parseName(base, a.Val)
if err == nil {
names = append(names, name)
}
break
}
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
walk(c)
}
}
walk(doc)
return names, nil
}
// Read the directory passed in
func (f *Fs) readDir(dir string) (names []string, err error) {
URL := f.url(dir)
u, err := url.Parse(URL)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
}
if !strings.HasSuffix(URL, "/") {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
res, err := f.httpClient.Get(URL)
if err == nil && res.StatusCode == http.StatusNotFound {
return nil, fs.ErrorDirNotFound
}
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
}
defer fs.CheckClose(res.Body, &err)
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
switch contentType {
case "text/html":
names, err = parse(u, res.Body)
if err != nil {
return nil, errors.Wrap(err, "readDir")
}
default:
return nil, errors.Errorf("Can't parse content type %q", contentType)
}
return names, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if !strings.HasSuffix(dir, "/") && dir != "" {
dir += "/"
}
names, err := f.readDir(dir)
if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir)
}
for _, name := range names {
isDir := name[len(name)-1] == '/'
name = strings.TrimRight(name, "/")
remote := path.Join(dir, name)
if isDir {
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
} else {
file := &Object{
fs: f,
remote: remote,
}
if err = file.stat(); err != nil {
fs.Debugf(remote, "skipping because of error: %v", err)
continue
}
entries = append(entries, file)
}
}
return entries, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// Fs is the filesystem this remote http file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// String returns the URL to the remote HTTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
return o.remote
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Size returns the size in bytes of the remote http file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime() time.Time {
return o.modTime
}
// url returns the native url of the object
func (o *Object) url() string {
return o.fs.url(o.remote)
}
// stat updates the info field in the Object
func (o *Object) stat() error {
url := o.url()
res, err := o.fs.httpClient.Head(url)
err = statusError(res, err)
if err != nil {
return errors.Wrap(err, "failed to stat")
}
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
t = timeUnset
}
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
return nil
}
// SetModTime sets the modification and access time to the specified time
//
// it also updates the info field
func (o *Object) SetModTime(modTime time.Time) error {
return errorReadOnly
}
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
func (o *Object) Storable() bool {
return true
}
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url()
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
}
// Add optional headers
for k, v := range fs.OpenOptionHeaders(options) {
req.Header.Add(k, v)
}
// Do the request
res, err := o.fs.httpClient.Do(req)
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
}
return res.Body, nil
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(dir string) error {
return errorReadOnly
}
// Remove a remote http file object
func (o *Object) Remove() error {
return errorReadOnly
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(dir string) error {
return errorReadOnly
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string {
return o.contentType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -0,0 +1,321 @@
// +build go1.8
package http
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"sort"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/lib/rest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
)
// prepareServer the test server and return a function to tidy it up afterwards
func prepareServer(t *testing.T) func() {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// Make the test server
ts := httptest.NewServer(fileServer)
// Configure the remote
config.LoadConfig()
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true
config.FileSet(remoteName, "type", "http")
config.FileSet(remoteName, "url", ts.URL)
// return a function to tidy up
return ts.Close
}
// prepare the test server and return a function to tidy it up afterwards
func prepare(t *testing.T) (fs.Fs, func()) {
tidy := prepareServer(t)
// Instantiate it
f, err := NewFs(remoteName, "")
require.NoError(t, err)
return f, tidy
}
func testListRoot(t *testing.T, f fs.Fs) {
entries, err := f.List("")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, 4, len(entries))
e := entries[0]
assert.Equal(t, "four", e.Remote())
assert.Equal(t, int64(-1), e.Size())
_, ok := e.(fs.Directory)
assert.True(t, ok)
e = entries[1]
assert.Equal(t, "one%.txt", e.Remote())
assert.Equal(t, int64(6), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
e = entries[2]
assert.Equal(t, "three", e.Remote())
assert.Equal(t, int64(-1), e.Size())
_, ok = e.(fs.Directory)
assert.True(t, ok)
e = entries[3]
assert.Equal(t, "two.html", e.Remote())
assert.Equal(t, int64(7), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
func TestListRoot(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
testListRoot(t, f)
}
func TestListSubDir(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
entries, err := f.List("three")
require.NoError(t, err)
sort.Sort(entries)
assert.Equal(t, 1, len(entries))
e := entries[0]
assert.Equal(t, "three/underthree.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
func TestNewObject(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject("four/under four.txt")
require.NoError(t, err)
assert.Equal(t, "four/under four.txt", o.Remote())
assert.Equal(t, int64(9), o.Size())
_, ok := o.(*Object)
assert.True(t, ok)
// Test the time is correct on the object
tObj := o.ModTime()
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
}
func TestOpen(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject("four/under four.txt")
require.NoError(t, err)
// Test normal read
fd, err := o.Open()
require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "beetroot\n", string(data))
// Test with range request
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
func TestMimeType(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject("four/under four.txt")
require.NoError(t, err)
do, ok := o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType())
}
func TestIsAFileRoot(t *testing.T) {
tidy := prepareServer(t)
defer tidy()
f, err := NewFs(remoteName, "one%.txt")
assert.Equal(t, err, fs.ErrorIsFile)
testListRoot(t, f)
}
func TestIsAFileSubDir(t *testing.T) {
tidy := prepareServer(t)
defer tidy()
f, err := NewFs(remoteName, "three/underthree.txt")
assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List("")
require.NoError(t, err)
sort.Sort(entries)
assert.Equal(t, 1, len(entries))
e := entries[0]
assert.Equal(t, "underthree.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
func TestParseName(t *testing.T) {
for i, test := range []struct {
base string
val string
wantErr error
want string
}{
{"http://example.com/", "potato", nil, "potato"},
{"http://example.com/dir/", "potato", nil, "potato"},
{"http://example.com/dir/", "potato?download=true", errFoundQuestionMark, ""},
{"http://example.com/dir/", "../dir/potato", nil, "potato"},
{"http://example.com/dir/", "..", errNotUnderRoot, ""},
{"http://example.com/dir/", "http://example.com/", errNotUnderRoot, ""},
{"http://example.com/dir/", "http://example.com/dir/", errNameIsEmpty, ""},
{"http://example.com/dir/", "http://example.com/dir/potato", nil, "potato"},
{"http://example.com/dir/", "https://example.com/dir/potato", errSchemeMismatch, ""},
{"http://example.com/dir/", "http://notexample.com/dir/potato", errHostMismatch, ""},
{"http://example.com/dir/", "/dir/", errNameIsEmpty, ""},
{"http://example.com/dir/", "/dir/potato", nil, "potato"},
{"http://example.com/dir/", "subdir/potato", errNameContainsSlash, ""},
{"http://example.com/dir/", "With percent %25.txt", nil, "With percent %.txt"},
{"http://example.com/dir/", "With colon :", errURLJoinFailed, ""},
{"http://example.com/dir/", rest.URLPathEscape("With colon :"), nil, "With colon :"},
{"http://example.com/Dungeons%20%26%20Dragons/", "/Dungeons%20&%20Dragons/D%26D%20Basic%20%28Holmes%2C%20B%2C%20X%2C%20BECMI%29/", nil, "D&D Basic (Holmes, B, X, BECMI)/"},
} {
u, err := url.Parse(test.base)
require.NoError(t, err)
got, gotErr := parseName(u, test.val)
what := fmt.Sprintf("test %d base=%q, val=%q", i, test.base, test.val)
assert.Equal(t, test.wantErr, gotErr, what)
assert.Equal(t, test.want, got, what)
}
}
// Load HTML from the file given and parse it, checking it against the entries passed in
func parseHTML(t *testing.T, name string, base string, want []string) {
in, err := os.Open(filepath.Join(testPath, "index_files", name))
require.NoError(t, err)
defer func() {
require.NoError(t, in.Close())
}()
if base == "" {
base = "http://example.com/"
}
u, err := url.Parse(base)
require.NoError(t, err)
entries, err := parse(u, in)
require.NoError(t, err)
assert.Equal(t, want, entries)
}
func TestParseEmpty(t *testing.T) {
parseHTML(t, "empty.html", "", []string(nil))
}
func TestParseApache(t *testing.T) {
parseHTML(t, "apache.html", "http://example.com/nick/pub/", []string{
"SWIG-embed.tar.gz",
"avi2dvd.pl",
"cambert.exe",
"cambert.gz",
"fedora_demo.gz",
"gchq-challenge/",
"mandelterm/",
"pgp-key.txt",
"pymath/",
"rclone",
"readdir.exe",
"rush_hour_solver_cut_down.py",
"snake-puzzle/",
"stressdisk/",
"timer-test",
"words-to-regexp.pl",
"Now 100% better.mp3",
"Now better.mp3",
})
}
func TestParseMemstore(t *testing.T) {
parseHTML(t, "memstore.html", "", []string{
"test/",
"v1.35/",
"v1.36-01-g503cd84/",
"rclone-beta-latest-freebsd-386.zip",
"rclone-beta-latest-freebsd-amd64.zip",
"rclone-beta-latest-windows-amd64.zip",
})
}
func TestParseNginx(t *testing.T) {
parseHTML(t, "nginx.html", "", []string{
"deltas/",
"objects/",
"refs/",
"state/",
"config",
"summary",
})
}
func TestParseCaddy(t *testing.T) {
parseHTML(t, "caddy.html", "", []string{
"mimetype.zip",
"rclone-delete-empty-dirs.py",
"rclone-show-empty-dirs.py",
"stat-windows-386.zip",
"v1.36-155-gcf29ee8b-team-driveβ/",
"v1.36-156-gca76b3fb-team-driveβ/",
"v1.36-156-ge1f0e0f5-team-driveβ/",
"v1.36-22-g06ea13a-ssh-agentβ/",
})
}

View File

@@ -0,0 +1 @@
beetroot

View File

@@ -0,0 +1 @@
hello

View File

@@ -0,0 +1 @@
rutabaga

View File

@@ -0,0 +1 @@
potato

View File

@@ -0,0 +1,32 @@
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<html>
<head>
<title>Index of /nick/pub</title>
</head>
<body>
<h1>Index of /nick/pub</h1>
<table><tr><th><img src="/icons/blank.gif" alt="[ICO]"></th><th><a href="?C=N;O=D">Name</a></th><th><a href="?C=M;O=A">Last modified</a></th><th><a href="?C=S;O=A">Size</a></th><th><a href="?C=D;O=A">Description</a></th></tr><tr><th colspan="5"><hr></th></tr>
<tr><td valign="top"><img src="/icons/back.gif" alt="[DIR]"></td><td><a href="/nick/">Parent Directory</a></td><td>&nbsp;</td><td align="right"> - </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="SWIG-embed.tar.gz">SWIG-embed.tar.gz</a></td><td align="right">29-Nov-2005 16:27 </td><td align="right">2.3K</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="avi2dvd.pl">avi2dvd.pl</a></td><td align="right">14-Apr-2010 23:07 </td><td align="right"> 17K</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/binary.gif" alt="[ ]"></td><td><a href="cambert.exe">cambert.exe</a></td><td align="right">15-Dec-2006 18:07 </td><td align="right"> 54K</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="cambert.gz">cambert.gz</a></td><td align="right">14-Apr-2010 23:07 </td><td align="right"> 18K</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="fedora_demo.gz">fedora_demo.gz</a></td><td align="right">08-Jun-2007 11:01 </td><td align="right">1.0M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="gchq-challenge/">gchq-challenge/</a></td><td align="right">24-Dec-2016 15:24 </td><td align="right"> - </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="mandelterm/">mandelterm/</a></td><td align="right">13-Jul-2013 22:22 </td><td align="right"> - </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="pgp-key.txt">pgp-key.txt</a></td><td align="right">14-Apr-2010 23:07 </td><td align="right">400 </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="pymath/">pymath/</a></td><td align="right">24-Dec-2016 15:24 </td><td align="right"> - </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="rclone">rclone</a></td><td align="right">09-May-2017 17:15 </td><td align="right"> 22M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/binary.gif" alt="[ ]"></td><td><a href="readdir.exe">readdir.exe</a></td><td align="right">21-Oct-2016 14:47 </td><td align="right">1.6M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="rush_hour_solver_cut_down.py">rush_hour_solver_cut_down.py</a></td><td align="right">23-Jul-2009 11:44 </td><td align="right"> 14K</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="snake-puzzle/">snake-puzzle/</a></td><td align="right">25-Sep-2016 20:56 </td><td align="right"> - </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="stressdisk/">stressdisk/</a></td><td align="right">08-Nov-2016 14:25 </td><td align="right"> - </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td>&nbsp;</td></tr>
<tr><th colspan="5"><hr></th></tr>
<!-- some extras from https://github.com/ncw/rclone/issues/1573 -->
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>
</table>
</body></html>

View File

@@ -0,0 +1,378 @@
<!DOCTYPE html>
<html>
<head>
<title>/</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<style>
* { padding: 0; margin: 0; }
body {
font-family: sans-serif;
text-rendering: optimizespeed;
}
a {
color: #006ed3;
text-decoration: none;
}
a:hover,
h1 a:hover {
color: #319cff;
}
header,
#summary {
padding-left: 5%;
padding-right: 5%;
}
th:first-child,
td:first-child {
padding-left: 5%;
}
th:last-child,
td:last-child {
padding-right: 5%;
}
header {
padding-top: 25px;
padding-bottom: 15px;
background-color: #f2f2f2;
}
h1 {
font-size: 20px;
font-weight: normal;
white-space: nowrap;
overflow-x: hidden;
text-overflow: ellipsis;
}
h1 a {
color: inherit;
}
h1 a:hover {
text-decoration: underline;
}
main {
display: block;
}
.meta {
font-size: 12px;
font-family: Verdana, sans-serif;
border-bottom: 1px solid #9C9C9C;
padding-top: 10px;
padding-bottom: 10px;
}
.meta-item {
margin-right: 1em;
}
#filter {
padding: 4px;
border: 1px solid #CCC;
}
table {
width: 100%;
border-collapse: collapse;
}
tr {
border-bottom: 1px dashed #dadada;
}
tbody tr:hover {
background-color: #ffffec;
}
th,
td {
text-align: left;
padding: 10px 0;
}
th {
padding-top: 15px;
padding-bottom: 15px;
font-size: 16px;
white-space: nowrap;
}
th a {
color: black;
}
th svg {
vertical-align: middle;
}
td {
font-size: 14px;
}
td:first-child {
width: 50%;
}
th:last-child,
td:last-child {
text-align: right;
}
td:first-child svg {
position: absolute;
}
td .name,
td .goup {
margin-left: 1.75em;
word-break: break-all;
overflow-wrap: break-word;
white-space: pre-wrap;
}
footer {
padding: 40px 20px;
font-size: 12px;
text-align: center;
}
@media (max-width: 600px) {
.hideable {
display: none;
}
td:first-child {
width: auto;
}
th:nth-child(2),
td:nth-child(2) {
padding-right: 5%;
text-align: right;
}
}
</style>
</head>
<body>
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" height="0" width="0" style="position: absolute;">
<defs>
<!-- Folder -->
<linearGradient id="f" y2="640" gradientUnits="userSpaceOnUse" x2="244.84" gradientTransform="matrix(.97319 0 0 1.0135 -.50695 -13.679)" y1="415.75" x1="244.84">
<stop stop-color="#b3ddfd" offset="0"/>
<stop stop-color="#69c" offset="1"/>
</linearGradient>
<linearGradient id="e" y2="571.06" gradientUnits="userSpaceOnUse" x2="238.03" gradientTransform="translate(0,2)" y1="346.05" x1="236.26">
<stop stop-color="#ace" offset="0"/>
<stop stop-color="#369" offset="1"/>
</linearGradient>
<g id="folder" transform="translate(-266.06 -193.36)">
<g transform="matrix(.066019 0 0 .066019 264.2 170.93)">
<g transform="matrix(1.4738 0 0 1.4738 -52.053 -166.93)">
<path fill="#69c" d="m98.424 343.78c-11.08 0-20 8.92-20 20v48.5 33.719 105.06c0 11.08 8.92 20 20 20h279.22c11.08 0 20-8.92 20-20v-138.78c0-11.08-8.92-20-20-20h-117.12c-7.5478-1.1844-9.7958-6.8483-10.375-11.312v-5.625-11.562c0-11.08-8.92-20-20-20h-131.72z"/>
<rect rx="12.885" ry="12.199" height="227.28" width="366.69" y="409.69" x="54.428" fill="#369"/>
<path fill="url(#e)" d="m98.424 345.78c-11.08 0-20 8.92-20 20v48.5 33.719 105.06c0 11.08 8.92 20 20 20h279.22c11.08 0 20-8.92 20-20v-138.78c0-11.08-8.92-20-20-20h-117.12c-7.5478-1.1844-9.7958-6.8483-10.375-11.312v-5.625-11.562c0-11.08-8.92-20-20-20h-131.72z"/>
<rect rx="12.885" ry="12.199" height="227.28" width="366.69" y="407.69" x="54.428" fill="url(#f)"/>
</g>
</g>
</g>
<!-- File -->
<linearGradient id="a">
<stop stop-color="#cbcbcb" offset="0"/>
<stop stop-color="#f0f0f0" offset=".34923"/>
<stop stop-color="#e2e2e2" offset="1"/>
</linearGradient>
<linearGradient id="d" y2="686.15" xlink:href="#a" gradientUnits="userSpaceOnUse" y1="207.83" gradientTransform="matrix(.28346 0 0 .31053 -608.52 485.11)" x2="380.1" x1="749.25"/>
<linearGradient id="c" y2="287.74" xlink:href="#a" gradientUnits="userSpaceOnUse" y1="169.44" gradientTransform="matrix(.28342 0 0 .31057 -608.52 485.11)" x2="622.33" x1="741.64"/>
<linearGradient id="b" y2="418.54" gradientUnits="userSpaceOnUse" y1="236.13" gradientTransform="matrix(.29343 0 0 .29999 -608.52 485.11)" x2="330.88" x1="687.96">
<stop stop-color="#fff" offset="0"/>
<stop stop-color="#fff" stop-opacity="0" offset="1"/>
</linearGradient>
<g id="file" transform="translate(-278.15 -216.59)">
<g fill-rule="evenodd" transform="matrix(.19775 0 0 .19775 381.05 112.68)">
<path d="m-520.17 525.5v36.739 36.739 36.739 36.739h33.528 33.528 33.528 33.528v-36.739-36.739-36.739l-33.528-36.739h-33.528-33.528-33.528z" stroke-opacity=".36478" stroke-width=".42649" fill="#fff"/>
<g>
<path d="m-520.11 525.68v36.739 36.739 36.739 36.739h33.528 33.528 33.528 33.528v-36.739-36.739-36.739l-33.528-36.739h-33.528-33.528-33.528z" stroke-opacity=".36478" stroke="#000" stroke-width=".42649" fill="url(#d)"/>
<path d="m-386 562.42c-10.108-2.9925-23.206-2.5682-33.101-0.86253 1.7084-10.962 1.922-24.701-0.4271-35.877l33.528 36.739z" stroke-width=".95407pt" fill="url(#c)"/>
<path d="m-519.13 537-0.60402 134.7h131.68l0.0755-33.296c-2.9446 1.1325-32.692-40.998-70.141-39.186-37.483 1.8137-27.785-56.777-61.006-62.214z" stroke-width="1pt" fill="url(#b)"/>
</g>
</g>
</g>
<!-- Up arrow -->
<g id="up-arrow" transform="translate(-279.22 -208.12)">
<path transform="matrix(.22413 0 0 .12089 335.67 164.35)" stroke-width="0" d="m-194.17 412.01h-28.827-28.827l14.414-24.965 14.414-24.965 14.414 24.965z"/>
</g>
<!-- Down arrow -->
<g id="down-arrow" transform="translate(-279.22 -208.12)">
<path transform="matrix(.22413 0 0 -.12089 335.67 257.93)" stroke-width="0" d="m-194.17 412.01h-28.827-28.827l14.414-24.965 14.414-24.965 14.414 24.965z"/>
</g>
</defs>
</svg>
<header>
<h1>
<a href="/">/</a>
</h1>
</header>
<main>
<div class="meta">
<div id="summary">
<span class="meta-item"><b>4</b> directories</span>
<span class="meta-item"><b>4</b> files</span>
<span class="meta-item"><input type="text" placeholder="filter" id="filter" onkeyup='filter()'></span>
</div>
</div>
<div class="listing">
<table aria-describedby="summary">
<thead>
<tr>
<th>
<a href="?sort=name&order=desc">Name <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a>
</th>
<th>
<a href="?sort=size&order=asc">Size</a>
</th>
<th class="hideable">
<a href="?sort=time&order=asc">Modified</a>
</th>
</tr>
</thead>
<tbody>
<tr class="file">
<td>
<a href="./mimetype.zip">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
<span class="name">mimetype.zip</span>
</a>
</td>
<td data-order="783696">765 KiB</td>
<td class="hideable"><time datetime="2016-04-04T15:36:49Z">04/04/2016 03:36:49 PM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./rclone-delete-empty-dirs.py">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
<span class="name">rclone-delete-empty-dirs.py</span>
</a>
</td>
<td data-order="1271">1.2 KiB</td>
<td class="hideable"><time datetime="2016-10-26T16:05:08Z">10/26/2016 04:05:08 PM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./rclone-show-empty-dirs.py">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
<span class="name">rclone-show-empty-dirs.py</span>
</a>
</td>
<td data-order="868">868 B</td>
<td class="hideable"><time datetime="2016-10-26T09:29:34Z">10/26/2016 09:29:34 AM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./stat-windows-386.zip">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
<span class="name">stat-windows-386.zip</span>
</a>
</td>
<td data-order="704960">688 KiB</td>
<td class="hideable"><time datetime="2016-08-14T20:44:58Z">08/14/2016 08:44:58 PM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./v1.36-155-gcf29ee8b-team-drive%CE%B2/">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
<span class="name">v1.36-155-gcf29ee8b-team-driveβ</span>
</a>
</td>
<td data-order="-1">&mdash;</td>
<td class="hideable"><time datetime="2017-06-01T21:28:09Z">06/01/2017 09:28:09 PM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./v1.36-156-gca76b3fb-team-drive%CE%B2/">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
<span class="name">v1.36-156-gca76b3fb-team-driveβ</span>
</a>
</td>
<td data-order="-1">&mdash;</td>
<td class="hideable"><time datetime="2017-06-04T08:53:04Z">06/04/2017 08:53:04 AM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./v1.36-156-ge1f0e0f5-team-drive%CE%B2/">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
<span class="name">v1.36-156-ge1f0e0f5-team-driveβ</span>
</a>
</td>
<td data-order="-1">&mdash;</td>
<td class="hideable"><time datetime="2017-06-02T10:38:05Z">06/02/2017 10:38:05 AM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./v1.36-22-g06ea13a-ssh-agent%CE%B2/">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
<span class="name">v1.36-22-g06ea13a-ssh-agentβ</span>
</a>
</td>
<td data-order="-1">&mdash;</td>
<td class="hideable"><time datetime="2017-04-10T13:58:02Z">04/10/2017 01:58:02 PM +00:00</time></td>
</tr>
</tbody>
</table>
</div>
</main>
<footer>
Served with <a rel="noopener noreferrer" href="https://caddyserver.com">Caddy</a>
</footer>
<script>
var filterEl = document.getElementById('filter');
function filter() {
var q = filterEl.value.trim().toLowerCase();
var elems = document.querySelectorAll('tr.file');
elems.forEach(function(el) {
if (!q) {
el.style.display = '';
return;
}
var nameEl = el.querySelector('.name');
var nameVal = nameEl.textContent.trim().toLowerCase();
if (nameVal.indexOf(q) !== -1) {
el.style.display = '';
} else {
el.style.display = 'none';
}
});
}
function localizeDatetime(e, index, ar) {
if (e.textContent === undefined) {
return;
}
var d = new Date(e.getAttribute('datetime'));
if (isNaN(d)) {
d = new Date(e.textContent);
if (isNaN(d)) {
return;
}
}
e.textContent = d.toLocaleString();
}
var timeList = Array.prototype.slice.call(document.getElementsByTagName("time"));
timeList.forEach(localizeDatetime);
</script>
</body>
</html>

View File

View File

@@ -0,0 +1,77 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<meta name="robots" content="noindex" />
<title>Index of /</title>
</head>
<body>
<div id="content">
<h1>Index of /</h1>
<table>
<thead>
<tr>
<th>Name</th>
<th>Type</th>
<th>Size</th>
<th>Last modified</th>
<th>MD5</th>
</tr>
</thead>
<tbody>
<tr>
<td><a href="test/">test/</a></td>
<td>application/directory</td>
<td>0 bytes</td>
<td>-</td>
<td>-</td>
</tr>
<tr>
<td><a href="v1.35/">v1.35/</a></td>
<td>application/directory</td>
<td>0 bytes</td>
<td>-</td>
<td>-</td>
</tr>
<tr>
<td><a href="v1.36-01-g503cd84/">v1.36-01-g503cd84/</a></td>
<td>application/directory</td>
<td>0 bytes</td>
<td>-</td>
<td>-</td>
</tr>
<tr>
<td><a href="rclone-beta-latest-freebsd-386.zip">rclone-beta-latest-freebsd-386.zip</a></td>
<td>application/zip</td>
<td>4.6 MB</td>
<td>2017-06-19 14:04:52</td>
<td>e747003c69c81e675f206a715264bfa8</td>
</tr>
<tr>
<td><a href="rclone-beta-latest-freebsd-amd64.zip">rclone-beta-latest-freebsd-amd64.zip</a></td>
<td>application/zip</td>
<td>5.0 MB</td>
<td>2017-06-19 14:04:53</td>
<td>ff30b5e9bf2863a2373069142e6f2b7f</td>
</tr>
<tr>
<td><a href="rclone-beta-latest-windows-amd64.zip">rclone-beta-latest-windows-amd64.zip</a></td>
<td>application/x-zip-compressed</td>
<td>4.9 MB</td>
<td>2017-06-19 13:56:02</td>
<td>851a5547a0495cbbd94cbc90a80ed6f5</td>
</tr>
</tbody>
</table>
<p class="right"><a href="http://www.memset.com/"><img src="http://www.memset.com/images/Memset_logo_2010.gif" alt="Memset Ltd." /></a></p>
</div>
</body>
</html>

View File

@@ -0,0 +1,12 @@
<html>
<head><title>Index of /atomic/fedora/</title></head>
<body bgcolor="white">
<h1>Index of /atomic/fedora/</h1><hr><pre><a href="../">../</a>
<a href="deltas/">deltas/</a> 04-May-2017 21:37 -
<a href="objects/">objects/</a> 04-May-2017 20:44 -
<a href="refs/">refs/</a> 04-May-2017 20:42 -
<a href="state/">state/</a> 04-May-2017 21:36 -
<a href="config">config</a> 04-May-2017 20:42 118
<a href="summary">summary</a> 04-May-2017 21:36 806
</pre><hr></body>
</html>

54
backend/hubic/auth.go Normal file
View File

@@ -0,0 +1,54 @@
package hubic
import (
"net/http"
"github.com/ncw/swift"
)
// auth is an authenticator for swift
type auth struct {
f *Fs
}
// newAuth creates a swift authenticator
func newAuth(f *Fs) *auth {
return &auth{
f: f,
}
}
// Request constructs a http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(*swift.Connection) (*http.Request, error) {
err := a.f.getCredentials()
if err != nil {
return nil, err
}
return nil, nil
}
// Response parses the result of an http request
func (a *auth) Response(resp *http.Response) error {
return nil
}
// The public storage URL - set Internal to true to read
// internal/service net URL
func (a *auth) StorageUrl(Internal bool) string { // nolint
return a.f.credentials.Endpoint
}
// The access token
func (a *auth) Token() string {
return a.f.credentials.Token
}
// The CDN url if available
func (a *auth) CdnUrl() string { // nolint
return ""
}
// Check the interfaces are satisfied
var _ swift.Authenticator = (*auth)(nil)

View File

@@ -13,9 +13,12 @@ import (
"net/http"
"time"
"github.com/ncw/rclone/backend/swift"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/swift"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/lib/oauthutil"
swiftLib "github.com/ncw/swift"
"github.com/pkg/errors"
"golang.org/x/oauth2"
@@ -38,7 +41,7 @@ var (
TokenURL: "https://api.hubic.com/oauth/token/",
},
ClientID: rcloneClientID,
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
@@ -56,10 +59,10 @@ func init() {
}
},
Options: []fs.Option{{
Name: fs.ConfigClientID,
Name: config.ConfigClientID,
Help: "Hubic Client Id - leave blank normally.",
}, {
Name: fs.ConfigClientSecret,
Name: config.ConfigClientSecret,
Help: "Hubic Client Secret - leave blank normally.",
}},
})
@@ -157,7 +160,7 @@ func NewFs(name, root string) (fs.Fs, error) {
Auth: newAuth(f),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fs.Config.Transport(),
Transport: fshttp.NewTransport(fs.Config),
}
err = c.Authenticate()
if err != nil {
@@ -165,7 +168,7 @@ func NewFs(name, root string) (fs.Fs, error) {
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(name, root, c)
swiftFs, err := swift.NewFsWithConnection(name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}

View File

@@ -0,0 +1,17 @@
// Test Hubic filesystem interface
package hubic_test
import (
"testing"
"github.com/ncw/rclone/backend/hubic"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
})
}

View File

@@ -0,0 +1,29 @@
// +build darwin dragonfly freebsd linux
package local
import (
"syscall"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
var s syscall.Statfs_t
err := syscall.Statfs(f.root, &s)
if err != nil {
return nil, errors.Wrap(err, "failed to read disk usage")
}
bs := int64(s.Bsize)
usage := &fs.Usage{
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// check interface
var _ fs.Abouter = &Fs{}

View File

@@ -0,0 +1,36 @@
// +build windows
package local
import (
"syscall"
"unsafe"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
var available, total, free int64
_, _, e1 := getFreeDiskSpace.Call(
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
)
if e1 != syscall.Errno(0) {
return nil, errors.Wrap(e1, "failed to read disk usage")
}
usage := &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used
Used: fs.NewUsageValue(total - free), // bytes in use
Free: fs.NewUsageValue(available), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// check interface
var _ fs.Abouter = &Fs{}

969
backend/local/local.go Normal file
View File

@@ -0,0 +1,969 @@
// Package local provides a filesystem interface
package local
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"google.golang.org/appengine/log"
)
var (
followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
noCheckUpdated = flags.BoolP("local-no-check-updated", "", false, "Don't check to see if the files change during upload")
)
// Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Optional: true,
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
}},
}},
}
fs.Register(fsi)
}
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
wmu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
nounc bool // Skip UNC conversion on Windows
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
dirNames *mapper // directory name mapping
objectHashesMu sync.Mutex // global lock for Object.hashes
}
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
}
// ------------------------------------------------------------
// NewFs constructs an Fs from the path
func NewFs(name, root string) (fs.Fs, error) {
var err error
if *noUTFNorm {
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
nounc := config.FileGet(name, "nounc")
f := &Fs{
name: name,
warned: make(map[string]struct{}),
nounc: nounc == "true",
dev: devUnset,
lstat: os.Lstat,
dirNames: newMapper(),
}
f.root = f.cleanPath(root)
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
}).Fill(f)
if *followSymlinks {
f.lstat = os.Stat
}
// Check to see if this points to a file
fi, err := f.lstat(f.root)
if err == nil {
f.dev = readDevice(fi)
}
if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root
f.root, _ = getDirFile(f.root)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Local file system at %s", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// caseInsenstive returns whether the remote is case insensitive or not
func (f *Fs) caseInsensitive() bool {
// FIXME not entirely accurate since you can have case
// sensitive Fses on darwin and case insenstive Fses on linux.
// Should probably check but that would involve creating a
// file in the remote to be most accurate which probably isn't
// desirable.
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
}
// newObject makes a half completed Object
//
// if dstPath is empty then it is made from remote
func (f *Fs) newObject(remote, dstPath string) *Object {
if dstPath == "" {
dstPath = f.cleanPath(filepath.Join(f.root, remote))
}
remote = f.cleanRemote(remote)
return &Object{
fs: f,
remote: remote,
path: dstPath,
}
}
// Return an Object from a path
//
// May return nil if an error occurred
func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Object, error) {
o := f.newObject(remote, dstPath)
if info != nil {
o.setMetadata(info)
} else {
err := o.lstat()
if err != nil {
if os.IsNotExist(err) {
return nil, fs.ErrorObjectNotFound
}
if os.IsPermission(err) {
return nil, fs.ErrorPermissionDenied
}
return nil, err
}
}
if o.mode.IsDir() {
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, "", nil)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
dir = f.dirNames.Load(dir)
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
remote := f.cleanRemote(dir)
_, err = os.Stat(fsDirPath)
if err != nil {
return nil, fs.ErrorDirNotFound
}
fd, err := os.Open(fsDirPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to open directory %q", dir)
}
defer func() {
cerr := fd.Close()
if cerr != nil && err == nil {
err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
}
}()
for {
fis, err := fd.Readdir(1024)
if err == io.EOF && len(fis) == 0 {
break
}
if err != nil {
return nil, errors.Wrapf(err, "failed to read directory %q", dir)
}
for _, fi := range fis {
name := fi.Name()
mode := fi.Mode()
newRemote := path.Join(remote, name)
newPath := filepath.Join(fsDirPath, name)
// Follow symlinks if required
if *followSymlinks && (mode&os.ModeSymlink) != 0 {
fi, err = os.Stat(newPath)
if err != nil {
return nil, err
}
mode = fi.Mode()
}
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi) {
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
entries = append(entries, d)
}
} else {
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
if err != nil {
return nil, err
}
if fso.Storable() {
entries = append(entries, fso)
}
}
}
}
return entries, nil
}
// cleanRemote makes string a valid UTF-8 string for remote strings.
//
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
// It also normalises the UTF-8 and converts the slashes if necessary.
func (f *Fs) cleanRemote(name string) string {
if !utf8.ValidString(name) {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
name = string([]rune(name))
}
name = filepath.ToSlash(name)
return name
}
// mapper maps raw to cleaned directory names
type mapper struct {
mu sync.RWMutex // mutex to protect the below
m map[string]string // map of un-normalised directory names
}
func newMapper() *mapper {
return &mapper{
m: make(map[string]string),
}
}
// Lookup a directory name to make a local name (reverses
// cleanDirName)
//
// FIXME this is temporary before we make a proper Directory object
func (m *mapper) Load(in string) string {
m.mu.RLock()
out, ok := m.m[in]
m.mu.RUnlock()
if ok {
return out
}
return in
}
// Cleans a directory name recording if it needed to be altered
//
// FIXME this is temporary before we make a proper Directory object
func (m *mapper) Save(in, out string) string {
if in != out {
m.mu.Lock()
m.m[out] = in
m.mu.Unlock()
}
return out
}
// Put the Object to the local filesystem
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
// Temporary Object under construction - info filled in by Update()
o := f.newObject(remote, "")
err := o.Update(in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
root := f.cleanPath(filepath.Join(f.root, dir))
err := os.MkdirAll(root, 0777)
if err != nil {
return err
}
if dir == "" {
fi, err := f.lstat(root)
if err != nil {
return err
}
f.dev = readDevice(fi)
}
return nil
}
// Rmdir removes the directory
//
// If it isn't empty it will return an error
func (f *Fs) Rmdir(dir string) error {
root := f.cleanPath(filepath.Join(f.root, dir))
return os.Remove(root)
}
// Precision of the file system
func (f *Fs) Precision() (precision time.Duration) {
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
})
return f.precision
}
// Read the precision
func (f *Fs) readPrecision() (precision time.Duration) {
// Default precision of 1s
precision = time.Second
// Create temporary file and test it
fd, err := ioutil.TempFile("", "rclone")
if err != nil {
// If failed return 1s
// fmt.Println("Failed to create temp file", err)
return time.Second
}
path := fd.Name()
// fmt.Println("Created temp file", path)
err = fd.Close()
if err != nil {
return time.Second
}
// Delete it on return
defer func() {
// fmt.Println("Remove temp file")
_ = os.Remove(path) // ignore error
}()
// Find the minimum duration we can detect
for duration := time.Duration(1); duration < time.Second; duration *= 10 {
// Current time with delta
t := time.Unix(time.Now().Unix(), int64(duration))
err := os.Chtimes(path, t, t)
if err != nil {
// fmt.Println("Failed to Chtimes", err)
break
}
// Read the actual time back
fi, err := os.Stat(path)
if err != nil {
// fmt.Println("Failed to Stat", err)
break
}
// If it matches - have found the precision
// fmt.Println("compare", fi.ModTime(), t)
if fi.ModTime().Equal(t) {
// fmt.Println("Precision detected as", duration)
return duration
}
}
return
}
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
fi, err := f.lstat(f.root)
if err != nil {
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", f.root)
}
return os.RemoveAll(f.root)
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Temporary Object under construction
dstObj := f.newObject(remote, "")
// Check it is a file if it exists
err := dstObj.lstat()
if os.IsNotExist(err) {
// OK
} else if err != nil {
return nil, err
} else if !dstObj.mode.IsRegular() {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
// Create destination
err = dstObj.mkdirAll()
if err != nil {
return nil, err
}
// Do the move
err = os.Rename(srcObj.path, dstObj.path)
if os.IsNotExist(err) {
// race condition, source was deleted in the meantime
return nil, err
} else if os.IsPermission(err) {
// not enough rights to write to dst
return nil, err
} else if err != nil {
// not quite clear, but probably trying to move a file across file system
// boundaries. Copying might still work.
fs.Debugf(src, "Can't move: %v: trying copy", err)
return nil, fs.ErrorCantMove
}
// Update the info
err = dstObj.lstat()
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := f.cleanPath(filepath.Join(srcFs.root, srcRemote))
dstPath := f.cleanPath(filepath.Join(f.root, dstRemote))
// Check if destination exists
_, err := os.Lstat(dstPath)
if !os.IsNotExist(err) {
return fs.ErrorDirExists
}
// Create parent of destination
dstParentPath, _ := getDirFile(dstPath)
err = os.MkdirAll(dstParentPath, 0777)
if err != nil {
return err
}
// Do the move
err = os.Rename(srcPath, dstPath)
if os.IsNotExist(err) {
// race condition, source was deleted in the meantime
return err
} else if os.IsPermission(err) {
// not enough rights to write to dst
return err
} else if err != nil {
// not quite clear, but probably trying to move directory across file system
// boundaries. Copying might still work.
fs.Debugf(src, "Can't move dir: %v: trying copy", err)
return fs.ErrorCantDirMove
}
return nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Supported
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(r hash.Type) (string, error) {
// Check that the underlying file hasn't changed
oldtime := o.modTime
oldsize := o.size
err := o.lstat()
if err != nil {
return "", errors.Wrap(err, "hash: failed to stat")
}
o.fs.objectHashesMu.Lock()
hashes := o.hashes
o.fs.objectHashesMu.Unlock()
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
in, err := os.Open(o.path)
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
hashes, err = hash.Stream(in)
closeErr := in.Close()
if err != nil {
return "", errors.Wrap(err, "hash: failed to read")
}
if closeErr != nil {
return "", errors.Wrap(closeErr, "hash: failed to close")
}
o.fs.objectHashesMu.Lock()
o.hashes = hashes
o.fs.objectHashesMu.Unlock()
}
return hashes[r], nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime() time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
return err
}
// Re-read metadata
return o.lstat()
}
// Storable returns a boolean showing if this object is storable
func (o *Object) Storable() bool {
// Check for control characters in the remote name and show non storable
for _, c := range o.Remote() {
if c >= 0x00 && c < 0x20 || c == 0x7F {
fs.Logf(o.fs, "Can't store file with control characters: %q", o.Remote())
return false
}
}
mode := o.mode
if mode&os.ModeSymlink != 0 {
if !*skipSymlinks {
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
}
return false
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
fs.Logf(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
// fs.Debugf(o, "Skipping directory")
return false
}
return true
}
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
// object that is read
type localOpenFile struct {
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash *hash.MultiHasher // currently accumulating hashes
fd *os.File // file object reference
}
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
if !*noCheckUpdated {
// Check if file has the same size and modTime
fi, err := file.fd.Stat()
if err != nil {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
}
if file.o.size != fi.Size() {
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
}
if !file.o.modTime.Equal(fi.ModTime()) {
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
}
}
n, err = file.in.Read(p)
if n > 0 {
// Hash routines never return an error
_, _ = file.hash.Write(p[:n])
}
return
}
// Close the object and update the hashes
func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
if file.hash.Size() == file.o.Size() {
file.o.fs.objectHashesMu.Lock()
file.o.hashes = file.hash.Sums()
file.o.fs.objectHashesMu.Unlock()
}
}
return err
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.size)
case *fs.HashesOption:
hashes = x.Hashes
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
fd, err := os.Open(o.path)
if err != nil {
return
}
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
if offset != 0 {
// seek the object
_, err = fd.Seek(offset, io.SeekStart)
// don't attempt to make checksums
return wrappedFd, err
}
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return nil, err
}
// Update the md5sum as we go along
in = &localOpenFile{
o: o,
in: wrappedFd,
hash: hash,
fd: fd,
}
return in, nil
}
// mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error {
dir, _ := getDirFile(o.path)
return os.MkdirAll(dir, 0777)
}
// Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.HashesOption:
hashes = x.Hashes
}
}
err := o.mkdirAll()
if err != nil {
return err
}
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
// Calculate the hash of the object we are reading as we go along
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return err
}
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
closeErr := out.Close()
if err == nil {
err = closeErr
}
if err != nil {
fs.Logf(o, "Removing partially written file on error: %v", err)
if removeErr := os.Remove(o.path); removeErr != nil {
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
}
return err
}
// All successful so update the hashes
o.fs.objectHashesMu.Lock()
o.hashes = hash.Sums()
o.fs.objectHashesMu.Unlock()
// Set the mtime
err = o.SetModTime(src.ModTime())
if err != nil {
return err
}
// ReRead info now that we have finished
return o.lstat()
}
// setMetadata sets the file info from the os.FileInfo passed in
func (o *Object) setMetadata(info os.FileInfo) {
// Don't overwrite the info if we don't need to
// this avoids upsetting the race detector
if o.size != info.Size() {
o.size = info.Size()
}
if !o.modTime.Equal(info.ModTime()) {
o.modTime = info.ModTime()
}
if o.mode != info.Mode() {
o.mode = info.Mode()
}
}
// Stat a Object into info
func (o *Object) lstat() error {
info, err := o.fs.lstat(o.path)
if err == nil {
o.setMetadata(info)
}
return err
}
// Remove an object
func (o *Object) Remove() error {
return remove(o.path)
}
// Return the directory and file from an OS path. Assumes
// os.PathSeparator is used.
func getDirFile(s string) (string, string) {
i := strings.LastIndex(s, string(os.PathSeparator))
dir, file := s[:i], s[i+1:]
if dir == "" {
dir = string(os.PathSeparator)
}
return dir, file
}
// cleanPathFragment cleans an OS path fragment which is part of a
// bigger path and not necessarily absolute
func cleanPathFragment(s string) string {
if s == "" {
return s
}
s = filepath.Clean(s)
if runtime.GOOS == "windows" {
s = strings.Replace(s, `/`, `\`, -1)
}
return s
}
// cleanPath cleans and makes absolute the path passed in and returns
// an OS path.
//
// The input might be in OS form or rclone form or a mixture, but the
// output is in OS form.
//
// On windows it makes the path UNC also and replaces any characters
// Windows can't deal with with their replacements.
func (f *Fs) cleanPath(s string) string {
s = cleanPathFragment(s)
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
if !f.nounc {
// Convert to UNC
s = uncPath(s)
}
s = cleanWindowsName(f, s)
} else {
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
}
return s
}
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(s string) string {
// UNC can NOT use "/", so convert all to "\"
s = strings.Replace(s, `/`, `\`, -1)
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(s, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(s, `\\?\`) {
return s
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
}
if isAbsWinDrive.MatchString(s) {
return `\\?\` + s
}
return s
}
// cleanWindowsName will clean invalid Windows characters replacing them with _
func cleanWindowsName(f *Fs, name string) string {
original := name
var name2 string
if strings.HasPrefix(name, `\\?\`) {
name2 = `\\?\`
name = strings.TrimPrefix(name, `\\?\`)
}
if strings.HasPrefix(name, `//?/`) {
name2 = `//?/`
name = strings.TrimPrefix(name, `//?/`)
}
// Colon is allowed as part of a drive name X:\
colonAt := strings.Index(name, ":")
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
// Copy to name2, which is unfiltered
name2 += name[0 : colonAt+1]
name = name[colonAt+1:]
}
name2 += strings.Map(func(r rune) rune {
switch r {
case '<', '>', '"', '|', '?', '*', ':':
return '_'
}
return r
}, name)
if name2 != original && f != nil {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Logf(f, "Replacing invalid characters in %q to %q", name, name2)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
}
return name2
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -0,0 +1,78 @@
package local
import (
"os"
"path"
"testing"
"time"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestMain drives the tests
func TestMain(m *testing.M) {
fstest.TestMain(m)
}
func TestMapper(t *testing.T) {
m := newMapper()
assert.Equal(t, m.m, map[string]string{})
assert.Equal(t, "potato", m.Save("potato", "potato"))
assert.Equal(t, m.m, map[string]string{})
assert.Equal(t, "-r'áö", m.Save("-r?'a´o¨", "-r'áö"))
assert.Equal(t, m.m, map[string]string{
"-r'áö": "-r?'a´o¨",
})
assert.Equal(t, "potato", m.Load("potato"))
assert.Equal(t, "-r?'a´o¨", m.Load("-r'áö"))
}
// Test copy with source file that's updating
func TestUpdatingCheck(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now())
fd, err := os.Open(path.Join(r.LocalName, filePath))
if err != nil {
t.Fatalf("failed opening file %q: %v", filePath, err)
}
fi, err := fd.Stat()
require.NoError(t, err)
o := &Object{size: fi.Size(), modTime: fi.ModTime()}
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
hash, err := hash.NewMultiHasherTypes(hash.Supported)
require.NoError(t, err)
in := localOpenFile{
o: o,
in: wrappedFd,
hash: hash,
fd: fd,
}
buf := make([]byte, 1)
_, err = in.Read(buf)
require.NoError(t, err)
r.WriteFile(filePath, "content updated", time.Now())
_, err = in.Read(buf)
require.Errorf(t, err, "can't copy - source file is being updated")
// turn the checking off and try again
*noCheckUpdated = true
defer func() {
*noCheckUpdated = false
}()
r.WriteFile(filePath, "content updated", time.Now())
_, err = in.Read(buf)
require.NoError(t, err)
}

View File

@@ -0,0 +1,17 @@
// Test Local filesystem interface
package local_test
import (
"testing"
"github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "",
NilObject: (*local.Object)(nil),
})
}

View File

@@ -9,10 +9,11 @@ import (
"syscall"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/flags"
)
var (
oneFileSystem = fs.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
)
// readDevice turns a valid os.FileInfo into a device number,

View File

@@ -0,0 +1,10 @@
//+build !windows
package local
import "os"
// Removes name, retrying on a sharing violation
func remove(name string) error {
return os.Remove(name)
}

View File

@@ -0,0 +1,50 @@
package local
import (
"io/ioutil"
"os"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check we can remove an open file
func TestRemove(t *testing.T) {
fd, err := ioutil.TempFile("", "rclone-remove-test")
require.NoError(t, err)
name := fd.Name()
defer func() {
_ = os.Remove(name)
}()
exists := func() bool {
_, err := os.Stat(name)
if err == nil {
return true
} else if os.IsNotExist(err) {
return false
}
require.NoError(t, err)
return false
}
assert.True(t, exists())
// close the file in the background
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(250 * time.Millisecond)
require.NoError(t, fd.Close())
}()
// delete the open file
err = remove(name)
require.NoError(t, err)
// check it no longer exists
assert.False(t, exists())
// wait for background close
wg.Wait()
}

Some files were not shown because too many files have changed in this diff Show More